Merge ath-next from ath.git

Major changes:

ath10k

* add board 2 API support for automatically choosing correct board file
* data path optimisations
* disable PCI power save for qca988x and QCA99x0 due to interop reasons

wil6210

* BlockAckReq support
* firmware crashdump using devcoredump
* capture all frames with sniffer
diff --git a/Documentation/Changes b/Documentation/Changes
index 6d88630..f447f05 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -43,7 +43,7 @@
 o  grub                   0.93                    # grub --version || grub-install --version
 o  mcelog                 0.6                     # mcelog --version
 o  iptables               1.4.2                   # iptables -V
-o  openssl & libcrypto    1.0.1k                  # openssl version
+o  openssl & libcrypto    1.0.0                   # openssl version
 
 
 Kernel compilation
diff --git a/Documentation/devicetree/bindings/arm/gic-v3.txt b/Documentation/devicetree/bindings/arm/gic-v3.txt
index ddfade4..7803e77 100644
--- a/Documentation/devicetree/bindings/arm/gic-v3.txt
+++ b/Documentation/devicetree/bindings/arm/gic-v3.txt
@@ -57,6 +57,8 @@
 These nodes must have the following properties:
 - compatible : Should at least contain  "arm,gic-v3-its".
 - msi-controller : Boolean property. Identifies the node as an MSI controller
+- #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device
+  which will generate the MSI.
 - reg: Specifies the base physical address and size of the ITS
   registers.
 
@@ -83,6 +85,7 @@
 		gic-its@2c200000 {
 			compatible = "arm,gic-v3-its";
 			msi-controller;
+			#msi-cells = <1>;
 			reg = <0x0 0x2c200000 0 0x200000>;
 		};
 	};
@@ -107,12 +110,14 @@
 		gic-its@2c200000 {
 			compatible = "arm,gic-v3-its";
 			msi-controller;
+			#msi-cells = <1>;
 			reg = <0x0 0x2c200000 0 0x200000>;
 		};
 
 		gic-its@2c400000 {
 			compatible = "arm,gic-v3-its";
 			msi-controller;
+			#msi-cells = <1>;
 			reg = <0x0 0x2c400000 0 0x200000>;
 		};
 	};
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt
index a8274ea..b8e41c1 100644
--- a/Documentation/devicetree/bindings/arm/idle-states.txt
+++ b/Documentation/devicetree/bindings/arm/idle-states.txt
@@ -497,7 +497,7 @@
 	};
 
 	idle-states {
-		entry-method = "arm,psci";
+		entry-method = "psci";
 
 		CPU_RETENTION_0_0: cpu-retention-0-0 {
 			compatible = "arm,idle-state";
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
index 5788d5c..82d40e2 100644
--- a/Documentation/devicetree/bindings/gpio/gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio.txt
@@ -16,7 +16,9 @@
 GPIO properties should be named "[<name>-]gpios", with <name> being the purpose
 of this GPIO for the device. While a non-existent <name> is considered valid
 for compatibility reasons (resolving to the "gpios" property), it is not allowed
-for new bindings.
+for new bindings. Also, GPIO properties named "[<name>-]gpio" are valid and old
+bindings use it, but are only supported for compatibility reasons and should not
+be used for newer bindings since it has been deprecated.
 
 GPIO properties can contain one or more GPIO phandles, but only in exceptional
 cases should they contain more than one. If your device uses several GPIOs with
diff --git a/Documentation/devicetree/bindings/iio/accel/bma180.txt b/Documentation/devicetree/bindings/iio/accel/bma180.txt
index c593357..4a3679d 100644
--- a/Documentation/devicetree/bindings/iio/accel/bma180.txt
+++ b/Documentation/devicetree/bindings/iio/accel/bma180.txt
@@ -1,10 +1,11 @@
-* Bosch BMA180 triaxial acceleration sensor
+* Bosch BMA180 / BMA250 triaxial acceleration sensor
 
 http://omapworld.com/BMA180_111_1002839.pdf
+http://ae-bst.resource.bosch.com/media/products/dokumente/bma250/bst-bma250-ds002-05.pdf
 
 Required properties:
 
-  - compatible : should be "bosch,bma180"
+  - compatible : should be "bosch,bma180" or "bosch,bma250"
   - reg : the I2C address of the sensor
 
 Optional properties:
@@ -13,6 +14,9 @@
 
   - interrupts : interrupt mapping for GPIO IRQ, it should by configured with
 		flags IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING
+		For the bma250 the first interrupt listed must be the one
+		connected to the INT1 pin, the second (optional) interrupt
+		listed must be the one connected to the INT2 pin.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
new file mode 100644
index 0000000..9d9e930
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
@@ -0,0 +1,36 @@
+* Toradex Colibri VF50 Touchscreen driver
+
+Required Properties:
+- compatible must be toradex,vf50-touchscreen
+- io-channels: adc channels being used by the Colibri VF50 module
+- xp-gpios: FET gate driver for input of X+
+- xm-gpios: FET gate driver for input of X-
+- yp-gpios: FET gate driver for input of Y+
+- ym-gpios: FET gate driver for input of Y-
+- interrupt-parent: phandle for the interrupt controller
+- interrupts: pen irq interrupt for touch detection
+- pinctrl-names: "idle", "default", "gpios"
+- pinctrl-0: pinctrl node for pen/touch detection state pinmux
+- pinctrl-1: pinctrl node for X/Y and pressure measurement (ADC) state pinmux
+- pinctrl-2: pinctrl node for gpios functioning as FET gate drivers
+- vf50-ts-min-pressure: pressure level at which to stop measuring X/Y values
+
+Example:
+
+	touchctrl: vf50_touchctrl {
+		compatible = "toradex,vf50-touchscreen";
+		io-channels = <&adc1 0>,<&adc0 0>,
+				<&adc0 1>,<&adc1 2>;
+		xp-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
+		xm-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
+		yp-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
+		ym-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+		interrupt-parent = <&gpio0>;
+		interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+		pinctrl-names = "idle","default","gpios";
+		pinctrl-0 = <&pinctrl_touchctrl_idle>;
+		pinctrl-1 = <&pinctrl_touchctrl_default>;
+		pinctrl-2 = <&pinctrl_touchctrl_gpios>;
+		vf50-ts-min-pressure = <200>;
+		status = "disabled";
+	};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt b/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt
new file mode 100644
index 0000000..853dff9
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt
@@ -0,0 +1,36 @@
+* Freescale i.MX6UL Touch Controller
+
+Required properties:
+- compatible: must be "fsl,imx6ul-tsc".
+- reg: this touch controller address and the ADC2 address.
+- interrupts: the interrupt of this touch controller and ADC2.
+- clocks: the root clock of touch controller and ADC2.
+- clock-names; must be "tsc" and "adc".
+- xnur-gpio: the X- gpio this controller connect to.
+  This xnur-gpio returns to low once the finger leave the touch screen (The
+  last touch event the touch controller capture).
+
+Optional properties:
+- measure-delay-time: the value of measure delay time.
+  Before X-axis or Y-axis measurement, the screen need some time before
+  even potential distribution ready.
+  This value depends on the touch screen.
+- pre-charge-time: the touch screen need some time to precharge.
+  This value depends on the touch screen.
+
+Example:
+	tsc: tsc@02040000 {
+		compatible = "fsl,imx6ul-tsc";
+		reg = <0x02040000 0x4000>, <0x0219c000 0x4000>;
+		interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&clks IMX6UL_CLK_IPG>,
+			 <&clks IMX6UL_CLK_ADC2>;
+		clock-names = "tsc", "adc";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_tsc>;
+		xnur-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
+		measure-delay-time = <0xfff>;
+		pre-charge-time = <0xffff>;
+		status = "okay";
+	};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
index 391717a..ec96b1f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
@@ -4,8 +4,8 @@
 interrupt.
 
 Required Properties:
-- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc"
-  as fallback
+- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or
+  "qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc"
 - reg: Base address and size of the controllers memory area
 - interrupt-parent: phandle of the parent interrupt controller.
 - interrupts: Interrupt specifier for the controllers interrupt.
@@ -13,6 +13,9 @@
 - #interrupt-cells : Specifies the number of cells needed to encode interrupt
 		     source, should be 1
 
+Compatible fallback depends on the SoC. Use ar7100 for ar71xx and ar913x,
+use ar7240 for all other SoCs.
+
 Please refer to interrupts.txt in this directory for details of the common
 Interrupt Controllers bindings used by client devices.
 
@@ -28,3 +31,16 @@
 		interrupt-controller;
 		#interrupt-cells = <1>;
 	};
+
+Another example:
+
+	interrupt-controller@18060010 {
+		compatible = "qca,ar9331-misc-intc", qca,ar7240-misc-intc";
+		reg = <0x18060010 0x4>;
+
+		interrupt-parent = <&cpuintc>;
+		interrupts = <6>;
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/net/brcm,iproc-mdio.txt b/Documentation/devicetree/bindings/net/brcm,iproc-mdio.txt
new file mode 100644
index 0000000..8ba9ed1
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/brcm,iproc-mdio.txt
@@ -0,0 +1,23 @@
+* Broadcom iProc MDIO bus controller
+
+Required properties:
+- compatible: should be "brcm,iproc-mdio"
+- reg: address and length of the register set for the MDIO interface
+- #size-cells: must be 1
+- #address-cells: must be 0
+
+Child nodes of this MDIO bus controller node are standard Ethernet PHY device
+nodes as described in Documentation/devicetree/bindings/net/phy.txt
+
+Example:
+
+mdio@18002000 {
+	compatible = "brcm,iproc-mdio";
+	reg = <0x18002000 0x8>;
+	#size-cells = <1>;
+	#address-cells = <0>;
+
+	enet-gphy@0 {
+		reg = <0>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/can/sun4i_can.txt b/Documentation/devicetree/bindings/net/can/sun4i_can.txt
new file mode 100644
index 0000000..84ed190
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/sun4i_can.txt
@@ -0,0 +1,36 @@
+Allwinner A10/A20 CAN controller Device Tree Bindings
+-----------------------------------------------------
+
+Required properties:
+- compatible: "allwinner,sun4i-a10-can"
+- reg: physical base address and size of the Allwinner A10/A20 CAN register map.
+- interrupts: interrupt specifier for the sole interrupt.
+- clock: phandle and clock specifier.
+
+Example
+-------
+
+SoC common .dtsi file:
+
+	can0_pins_a: can0@0 {
+		allwinner,pins = "PH20","PH21";
+		allwinner,function = "can";
+		allwinner,drive = <0>;
+		allwinner,pull = <0>;
+	};
+...
+	can0: can@01c2bc00 {
+		compatible = "allwinner,sun4i-a10-can";
+		reg = <0x01c2bc00 0x400>;
+		interrupts = <0 26 4>;
+		clocks = <&apb1_gates 4>;
+		status = "disabled";
+	};
+
+Board specific .dts file:
+
+	can0: can@01c2bc00 {
+		pinctrl-names = "default";
+		pinctrl-0 = <&can0_pins_a>;
+		status = "okay";
+	};
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
index 1e97532..db74f0d 100644
--- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
@@ -57,6 +57,10 @@
     "rgmii-id", as all other connection types are detected by hardware.
   - fsl,magic-packet : If present, indicates that the hardware supports
     waking up via magic packet.
+  - fsl,wake-on-filer : If present, indicates that the hardware supports
+    waking up by Filer General Purpose Interrupt (FGPI) asserted on the
+    Rx int line.  This is an advanced power management capability allowing
+    certain packet types (user) defined by filer rules to wake up the system.
   - bd-stash : If present, indicates that the hardware supports stashing
     buffer descriptors in the L2.
   - rx-stash-len : Denotes the number of bytes of a received buffer to stash
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt b/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt
index 988fc69..d1df8a0 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt
@@ -32,13 +32,13 @@
 
 Required properties:
 
-- compatible: should be "hisilicon,hip04-mdio".
+- compatible: should be "hisilicon,mdio".
 - Inherits from MDIO bus node binding [2]
 [2] Documentation/devicetree/bindings/net/phy.txt
 
 Example:
 	mdio {
-		compatible = "hisilicon,hip04-mdio";
+		compatible = "hisilicon,mdio";
 		reg = <0x28f1000 0x1000>;
 		#address-cells = <1>;
 		#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
new file mode 100644
index 0000000..80411b2
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
@@ -0,0 +1,49 @@
+Hisilicon DSA Fabric device controller
+
+Required properties:
+- compatible: should be "hisilicon,hns-dsaf-v1" or "hisilicon,hns-dsaf-v2".
+  "hisilicon,hns-dsaf-v1" is for hip05.
+  "hisilicon,hns-dsaf-v2" is for Hi1610 and Hi1612.
+- dsa-name: dsa fabric name who provide this interface.
+  should be "dsafX", X is the dsaf id.
+- mode: dsa fabric mode string. only support one of dsaf modes like these:
+		"2port-64vf",
+		"6port-16rss",
+		"6port-16vf".
+- interrupt-parent: the interrupt parent of this device.
+- interrupts: should contain the DSA Fabric and rcb interrupt.
+- reg: specifies base physical address(es) and size of the device registers.
+  The first region is external interface control register base and size.
+  The second region is SerDes base register and size.
+  The third region is the PPE register base and size.
+  The fourth region is dsa fabric base register and size.
+  The fifth region is cpld base register and size, it is not required if do not use cpld.
+- phy-handle: phy handle of physicl port, 0 if not any phy device. see ethernet.txt [1].
+- buf-size: rx buffer size, should be 16-1024.
+- desc-num: number of description in TX and RX queue, should be 512, 1024, 2048 or 4096.
+
+[1] Documentation/devicetree/bindings/net/phy.txt
+
+Example:
+
+dsa: dsa@c7000000 {
+	compatible = "hisilicon,hns-dsaf-v1";
+	dsa_name = "dsaf0";
+	mode = "6port-16rss";
+	interrupt-parent = <&mbigen_dsa>;
+	reg = <0x0 0xC0000000 0x0 0x420000
+	       0x0 0xC2000000 0x0 0x300000
+	       0x0 0xc5000000 0x0 0x890000
+	       0x0 0xc7000000 0x0 0x60000>;
+	phy-handle = <0 0 0 0 &soc0_phy4 &soc0_phy5 0 0>;
+	interrupts = <131 4>,<132 4>, <133 4>,<134 4>,
+		     <135 4>,<136 4>, <137 4>,<138 4>,
+		     <139 4>,<140 4>, <141 4>,<142 4>,
+		     <143 4>,<144 4>, <145 4>,<146 4>,
+		     <147 4>,<148 4>, <384 1>,<385 1>,
+		     <386 1>,<387 1>, <388 1>,<389 1>,
+		     <390 1>,<391 1>,
+	buf-size = <4096>;
+	desc-num = <1024>;
+	dma-coherent;
+};
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt
new file mode 100644
index 0000000..9940aa0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt
@@ -0,0 +1,22 @@
+Hisilicon MDIO bus controller
+
+Properties:
+- compatible: "hisilicon,mdio","hisilicon,hns-mdio".
+- reg: The base address of the MDIO bus controller register bank.
+- #address-cells: Must be <1>.
+- #size-cells: Must be <0>.  MDIO addresses have no size component.
+
+Typically an MDIO bus might have several children.
+
+Example:
+         mdio@803c0000 {
+                   #address-cells = <1>;
+                   #size-cells = <0>;
+                   compatible = "hisilicon,mdio","hisilicon,hns-mdio";
+                   reg = <0x0 0x803c0000 0x0 0x10000>;
+
+                   ethernet-phy@0 {
+                            ...
+                            reg = <0>;
+                   };
+         };
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt
new file mode 100644
index 0000000..41d19be
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt
@@ -0,0 +1,47 @@
+Hisilicon Network Subsystem NIC controller
+
+Required properties:
+- compatible: "hisilicon,hns-nic-v1" or "hisilicon,hns-nic-v2".
+  "hisilicon,hns-nic-v1" is for hip05.
+  "hisilicon,hns-nic-v2" is for Hi1610 and Hi1612.
+- ae-name: accelerator name who provides this interface,
+  is simply a name referring to the name of name in the accelerator node.
+- port-id: is the index of port provided by DSAF (the accelerator). DSAF can
+  connect to 8 PHYs. Port 0 to 1 are both used for adminstration purpose. They
+  are called debug ports.
+
+  The remaining 6 PHYs are taken according to the mode of DSAF.
+
+  In NIC mode of DSAF, all 6 PHYs are taken as ethernet ports to the CPU. The
+  port-id can be 2 to 7. Here is the diagram:
+            +-----+---------------+
+            |            CPU      |
+            +-+-+-+---+-+-+-+-+-+-+
+              | |     | | | | | |
+             debug       service
+             port         port
+             (0,1)       (2-7)
+
+  In Switch mode of DSAF, all 6 PHYs are taken as physical ports connect to a
+  LAN Switch while the CPU side assume itself have one single NIC connect to
+  this switch. In this case, the port-id will be 2 only.
+            +-----+---------------+
+            |            CPU      |
+            +-+-+-+---+-+-+-+-+-+-+
+              | |   service| port(2)
+             debug   +------------+
+             port    |   switch   |
+             (0,1)   +-+-+-+-+-+-++
+                       | | | | | |
+                      external port
+
+- local-mac-address: mac addr of the ethernet interface
+
+Example:
+
+	ethernet@0{
+		compatible = "hisilicon,hns-nic-v1";
+		ae-name = "dsaf0";
+		port-id = <0>;
+		local-mac-address = [a2 14 e4 4b 56 76];
+	};
diff --git a/Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt b/Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt
new file mode 100644
index 0000000..a4ed2ef
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt
@@ -0,0 +1,20 @@
+* MRF24J40 IEEE 802.15.4 *
+
+Required properties:
+  - compatible:		should be "microchip,mrf24j40", "microchip,mrf24j40ma",
+			or "microchip,mrf24j40mc" depends on your transceiver
+			board
+  - spi-max-frequency:	maximal bus speed, should be set something under or equal
+			10000000
+  - reg:		the chipselect index
+  - interrupts:		the interrupt generated by the device.
+
+Example:
+
+	mrf24j40ma@0 {
+		compatible = "microchip,mrf24j40ma";
+		spi-max-frequency = <8500000>;
+		reg = <0>;
+		interrupts = <19 8>;
+		interrupt-parent = <&gpio3>;
+	};
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index 1fd8831..b486f3f 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -6,8 +6,12 @@
 Required properties:
 - compatible: "renesas,etheravb-r8a7790" if the device is a part of R8A7790 SoC.
 	      "renesas,etheravb-r8a7794" if the device is a part of R8A7794 SoC.
+	      "renesas,etheravb-r8a7795" if the device is a part of R8A7795 SoC.
 - reg: offset and length of (1) the register block and (2) the stream buffer.
-- interrupts: interrupt specifier for the sole interrupt.
+- interrupts: A list of interrupt-specifiers, one for each entry in
+	      interrupt-names.
+	      If interrupt-names is not present, an interrupt specifier
+	      for a single muxed interrupt.
 - phy-mode: see ethernet.txt file in the same directory.
 - phy-handle: see ethernet.txt file in the same directory.
 - #address-cells: number of address cells for the MDIO bus, must be equal to 1.
@@ -18,6 +22,12 @@
 Optional properties:
 - interrupt-parent: the phandle for the interrupt controller that services
 		    interrupts for this device.
+- interrupt-names: A list of interrupt names.
+		   For the R8A7795 SoC this property is mandatory;
+		   it should include one entry per channel, named "ch%u",
+		   where %u is the channel number ranging from 0 to 24.
+		   For other SoCs this property is optional; if present
+		   it should contain "mux" for a single muxed interrupt.
 - pinctrl-names: pin configuration state name ("default").
 - renesas,no-ether-link: boolean, specify when a board does not provide a proper
 			 AVB_LINK signal.
@@ -27,13 +37,46 @@
 Example:
 
 	ethernet@e6800000 {
-		compatible = "renesas,etheravb-r8a7790";
-		reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
+		compatible = "renesas,etheravb-r8a7795";
+		reg = <0 0xe6800000 0 0x800>, <0 0xe6a00000 0 0x10000>;
 		interrupt-parent = <&gic>;
-		interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&mstp8_clks R8A7790_CLK_ETHERAVB>;
-		phy-mode = "rmii";
+		interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "ch0", "ch1", "ch2", "ch3",
+				  "ch4", "ch5", "ch6", "ch7",
+				  "ch8", "ch9", "ch10", "ch11",
+				  "ch12", "ch13", "ch14", "ch15",
+				  "ch16", "ch17", "ch18", "ch19",
+				  "ch20", "ch21", "ch22", "ch23",
+				  "ch24";
+		clocks = <&mstp8_clks R8A7795_CLK_ETHERAVB>;
+		power-domains = <&cpg_clocks>;
+		phy-mode = "rgmii-id";
 		phy-handle = <&phy0>;
+
 		pinctrl-0 = <&ether_pins>;
 		pinctrl-names = "default";
 		renesas,no-ether-link;
@@ -41,8 +84,20 @@
 		#size-cells = <0>;
 
 		phy0: ethernet-phy@0 {
+			rxc-skew-ps = <900>;
+			rxdv-skew-ps = <0>;
+			rxd0-skew-ps = <0>;
+			rxd1-skew-ps = <0>;
+			rxd2-skew-ps = <0>;
+			rxd3-skew-ps = <0>;
+			txc-skew-ps = <900>;
+			txen-skew-ps = <0>;
+			txd0-skew-ps = <0>;
+			txd1-skew-ps = <0>;
+			txd2-skew-ps = <0>;
+			txd3-skew-ps = <0>;
 			reg = <0>;
 			interrupt-parent = <&gpio2>;
-			interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+			interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
 		};
 	};
diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
index d8ef5bf..7fab84b 100644
--- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
+++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
@@ -7,7 +7,8 @@
 
 Required properties:
 - compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
-	      "renesas,pci-r8a7791" for the R8A7791 SoC.
+	      "renesas,pci-r8a7791" for the R8A7791 SoC;
+	      "renesas,pci-r8a7794" for the R8A7794 SoC.
 - reg:	A list of physical regions to access the device: the first is
 	the operational registers for the OHCI/EHCI controllers and the
 	second is for the bridge configuration and control registers.
diff --git a/Documentation/devicetree/bindings/regulator/pbias-regulator.txt b/Documentation/devicetree/bindings/regulator/pbias-regulator.txt
index 32aa26f..acbcb45 100644
--- a/Documentation/devicetree/bindings/regulator/pbias-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/pbias-regulator.txt
@@ -2,7 +2,12 @@
 
 Required properties:
 - compatible:
-  - "ti,pbias-omap" for OMAP2, OMAP3, OMAP4, OMAP5, DRA7.
+  - should be "ti,pbias-dra7" for DRA7
+  - should be "ti,pbias-omap2" for OMAP2
+  - should be "ti,pbias-omap3" for OMAP3
+  - should be "ti,pbias-omap4" for OMAP4
+  - should be "ti,pbias-omap5" for OMAP5
+  - "ti,pbias-omap" is deprecated
 - reg: pbias register offset from syscon base and size of pbias register.
 - syscon : phandle of the system control module
 - regulator-name : should be
diff --git a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
index dcefc43..6160ffb 100644
--- a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
@@ -15,17 +15,18 @@
 - interrupts: Should contain spi interrupt
 
 - clocks: phandles to input clocks.
-  The first should be <&topckgen CLK_TOP_SPI_SEL>.
-  The second should be one of the following.
+  The first should be one of the following. It's PLL.
    -  <&clk26m>: specify parent clock 26MHZ.
    -  <&topckgen CLK_TOP_SYSPLL3_D2>: specify parent clock 109MHZ.
 				      It's the default one.
    -  <&topckgen CLK_TOP_SYSPLL4_D2>: specify parent clock 78MHZ.
    -  <&topckgen CLK_TOP_UNIVPLL2_D4>: specify parent clock 104MHZ.
    -  <&topckgen CLK_TOP_UNIVPLL1_D8>: specify parent clock 78MHZ.
+  The second should be <&topckgen CLK_TOP_SPI_SEL>. It's clock mux.
+  The third is <&pericfg CLK_PERI_SPI0>. It's clock gate.
 
-- clock-names: shall be "spi-clk" for the controller clock, and
-  "parent-clk" for the parent clock.
+- clock-names: shall be "parent-clk" for the parent clock, "sel-clk" for the
+  muxes clock, and "spi-clk" for the clock gate.
 
 Optional properties:
 - mediatek,pad-select: specify which pins group(ck/mi/mo/cs) spi
@@ -44,8 +45,11 @@
 	#size-cells = <0>;
 	reg = <0 0x1100a000 0 0x1000>;
 	interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>;
-	clocks = <&topckgen CLK_TOP_SPI_SEL>, <&topckgen CLK_TOP_SYSPLL3_D2>;
-	clock-names = "spi-clk", "parent-clk";
+	clocks = <&topckgen CLK_TOP_SYSPLL3_D2>,
+		 <&topckgen CLK_TOP_SPI_SEL>,
+		 <&pericfg CLK_PERI_SPI0>;
+	clock-names = "parent-clk", "sel-clk", "spi-clk";
+
 	mediatek,pad-select = <0>;
 	status = "disabled";
 };
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt
index 8a49362..41b817f 100644
--- a/Documentation/devicetree/bindings/thermal/thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/thermal.txt
@@ -55,19 +55,11 @@
 the different fan speeds possible. Cooling states are referred to by
 single unsigned integers, where larger numbers mean greater heat
 dissipation. The precise set of cooling states associated with a device
-(as referred to be the cooling-min-state and cooling-max-state
+(as referred to by the cooling-min-level and cooling-max-level
 properties) should be defined in a particular device's binding.
 For more examples of cooling devices, refer to the example sections below.
 
 Required properties:
-- cooling-min-state:	An integer indicating the smallest
-  Type: unsigned	cooling state accepted. Typically 0.
-  Size: one cell
-
-- cooling-max-state:	An integer indicating the largest
-  Type: unsigned	cooling state accepted.
-  Size: one cell
-
 - #cooling-cells:	Used to provide cooling device specific information
   Type: unsigned	while referring to it. Must be at least 2, in order
   Size: one cell      	to specify minimum and maximum cooling state used
@@ -77,6 +69,15 @@
 			See Cooling device maps section below for more details
 			on how consumers refer to cooling devices.
 
+Optional properties:
+- cooling-min-level:	An integer indicating the smallest
+  Type: unsigned	cooling state accepted. Typically 0.
+  Size: one cell
+
+- cooling-max-level:	An integer indicating the largest
+  Type: unsigned	cooling state accepted.
+  Size: one cell
+
 * Trip points
 
 The trip node is a node to describe a point in the temperature domain
@@ -225,8 +226,8 @@
 			396000  950000
 			198000  850000
 		>;
-		cooling-min-state = <0>;
-		cooling-max-state = <3>;
+		cooling-min-level = <0>;
+		cooling-max-level = <3>;
 		#cooling-cells = <2>; /* min followed by max */
 	};
 	...
@@ -240,8 +241,8 @@
 	 */
 	fan0: fan@0x48 {
 		...
-		cooling-min-state = <0>;
-		cooling-max-state = <9>;
+		cooling-min-level = <0>;
+		cooling-max-level = <9>;
 		#cooling-cells = <2>; /* min followed by max */
 	};
 };
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
index d71ef07..a057b75 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
@@ -6,6 +6,7 @@
 	"lsi,zevio-usb"
 	"qcom,ci-hdrc"
 	"chipidea,usb2"
+	"xlnx,zynq-usb-2.20a"
 - reg: base address and length of the registers
 - interrupts: interrupt for the USB controller
 
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index ac5f0c3..82d2ac9 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -203,6 +203,7 @@
 skyworks	Skyworks Solutions, Inc.
 smsc	Standard Microsystems Corporation
 snps	Synopsys, Inc.
+socionext	Socionext Inc.
 solidrun	SolidRun
 solomon        Solomon Systech Limited
 sony	Sony Corporation
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt
new file mode 100644
index 0000000..f7cc7c0
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt
@@ -0,0 +1,35 @@
+* Atmel SAMA5D4 Watchdog Timer (WDT) Controller
+
+Required properties:
+- compatible: "atmel,sama5d4-wdt"
+- reg: base physical address and length of memory mapped region.
+
+Optional properties:
+- timeout-sec: watchdog timeout value (in seconds).
+- interrupts: interrupt number to the CPU.
+- atmel,watchdog-type: should be "hardware" or "software".
+	"hardware": enable watchdog fault reset. A watchdog fault triggers
+		    watchdog reset.
+	"software": enable watchdog fault interrupt. A watchdog fault asserts
+		    watchdog interrupt.
+- atmel,idle-halt: present if you want to stop the watchdog when the CPU is
+		   in idle state.
+	CAUTION: This property should be used with care, it actually makes the
+	watchdog not counting when the CPU is in idle state, therefore the
+	watchdog reset time depends on mean CPU usage and will not reset at all
+	if the CPU stop working while it is in idle state, which is probably
+	not what you want.
+- atmel,dbg-halt: present if you want to stop the watchdog when the CPU is
+		  in debug state.
+
+Example:
+	watchdog@fc068640 {
+		compatible = "atmel,sama5d4-wdt";
+		reg = <0xfc068640 0x10>;
+		interrupts = <4 IRQ_TYPE_LEVEL_HIGH 5>;
+		timeout-sec = <10>;
+		atmel,watchdog-type = "hardware";
+		atmel,dbg-halt;
+		atmel,idle-halt;
+		status = "okay";
+	};
diff --git a/Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt b/Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt
new file mode 100644
index 0000000..09f6b24
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt
@@ -0,0 +1,19 @@
+* NXP LPC18xx Watchdog Timer (WDT)
+
+Required properties:
+- compatible: Should be "nxp,lpc1850-wwdt"
+- reg: Should contain WDT registers location and length
+- clocks: Must contain an entry for each entry in clock-names.
+- clock-names: Should contain "wdtclk" and "reg"; the watchdog counter
+               clock and register interface clock respectively.
+- interrupts: Should contain WDT interrupt
+
+Examples:
+
+watchdog@40080000 {
+	compatible = "nxp,lpc1850-wwdt";
+	reg = <0x40080000 0x24>;
+	clocks = <&cgu BASE_SAFE_CLK>, <&ccu1 CLK_CPU_WWDT>;
+	clock-names = "wdtclk", "reg";
+	interrupts = <49>;
+};
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
index b80606d..f59c43b 100644
--- a/Documentation/gpio/board.txt
+++ b/Documentation/gpio/board.txt
@@ -21,8 +21,8 @@
 device tree bindings for your controller.
 
 GPIOs mappings are defined in the consumer device's node, in a property named
-<function>-gpios, where <function> is the function the driver will request
-through gpiod_get(). For example:
+either <function>-gpios or <function>-gpio, where <function> is the function
+the driver will request through gpiod_get(). For example:
 
 	foo_device {
 		compatible = "acme,foo";
@@ -31,7 +31,7 @@
 			    <&gpio 16 GPIO_ACTIVE_HIGH>, /* green */
 			    <&gpio 17 GPIO_ACTIVE_HIGH>; /* blue */
 
-		power-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
+		power-gpio = <&gpio 1 GPIO_ACTIVE_LOW>;
 	};
 
 This property will make GPIOs 15, 16 and 17 available to the driver under the
@@ -39,15 +39,24 @@
 
 	struct gpio_desc *red, *green, *blue, *power;
 
-	red = gpiod_get_index(dev, "led", 0);
-	green = gpiod_get_index(dev, "led", 1);
-	blue = gpiod_get_index(dev, "led", 2);
+	red = gpiod_get_index(dev, "led", 0, GPIOD_OUT_HIGH);
+	green = gpiod_get_index(dev, "led", 1, GPIOD_OUT_HIGH);
+	blue = gpiod_get_index(dev, "led", 2, GPIOD_OUT_HIGH);
 
-	power = gpiod_get(dev, "power");
+	power = gpiod_get(dev, "power", GPIOD_OUT_HIGH);
 
 The led GPIOs will be active-high, while the power GPIO will be active-low (i.e.
 gpiod_is_active_low(power) will be true).
 
+The second parameter of the gpiod_get() functions, the con_id string, has to be
+the <function>-prefix of the GPIO suffixes ("gpios" or "gpio", automatically
+looked up by the gpiod functions internally) used in the device tree. With above
+"led-gpios" example, use the prefix without the "-" as con_id parameter: "led".
+
+Internally, the GPIO subsystem prefixes the GPIO suffix ("gpios" or "gpio")
+with the string passed in con_id to get the resulting string
+(snprintf(... "%s-%s", con_id, gpio_suffixes[]).
+
 ACPI
 ----
 ACPI also supports function names for GPIOs in a similar fashion to DT.
@@ -142,13 +151,14 @@
 
 	struct gpio_desc *red, *green, *blue, *power;
 
-	red = gpiod_get_index(dev, "led", 0);
-	green = gpiod_get_index(dev, "led", 1);
-	blue = gpiod_get_index(dev, "led", 2);
+	red = gpiod_get_index(dev, "led", 0, GPIOD_OUT_HIGH);
+	green = gpiod_get_index(dev, "led", 1, GPIOD_OUT_HIGH);
+	blue = gpiod_get_index(dev, "led", 2, GPIOD_OUT_HIGH);
 
-	power = gpiod_get(dev, "power");
-	gpiod_direction_output(power, 1);
+	power = gpiod_get(dev, "power", GPIOD_OUT_HIGH);
 
-Since the "power" GPIO is mapped as active-low, its actual signal will be 0
-after this code. Contrary to the legacy integer GPIO interface, the active-low
-property is handled during mapping and is thus transparent to GPIO consumers.
+Since the "led" GPIOs are mapped as active-high, this example will switch their
+signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped
+as active-low, its actual signal will be 0 after this code. Contrary to the legacy
+integer GPIO interface, the active-low property is handled during mapping and is
+thus transparent to GPIO consumers.
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
index a206639..e000502 100644
--- a/Documentation/gpio/consumer.txt
+++ b/Documentation/gpio/consumer.txt
@@ -39,6 +39,9 @@
 					  const char *con_id, unsigned int idx,
 					  enum gpiod_flags flags)
 
+For a more detailed description of the con_id parameter in the DeviceTree case
+see Documentation/gpio/board.txt
+
 The flags parameter is used to optionally specify a direction and initial value
 for the GPIO. Values can be:
 
diff --git a/Documentation/hwmon/nct6775 b/Documentation/hwmon/nct6775
index f0dd3d2..76add4c 100644
--- a/Documentation/hwmon/nct6775
+++ b/Documentation/hwmon/nct6775
@@ -32,6 +32,10 @@
     Prefix: 'nct6792'
     Addresses scanned: ISA address retrieved from Super I/O registers
     Datasheet: Available from Nuvoton upon request
+  * Nuvoton NCT6793D
+    Prefix: 'nct6793'
+    Addresses scanned: ISA address retrieved from Super I/O registers
+    Datasheet: Available from Nuvoton upon request
 
 Authors:
         Guenter Roeck <linux@roeck-us.net>
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index 1700756..aa69ccc 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -7,11 +7,11 @@
 The IEEE 802.15.4 working group focuses on standardization of bottom
 two layers: Medium Access Control (MAC) and Physical (PHY). And there
 are mainly two options available for upper layers:
- - ZigBee - proprietary protocol from ZigBee Alliance
- - 6LowPAN - IPv6 networking over low rate personal area networks
+ - ZigBee - proprietary protocol from the ZigBee Alliance
+ - 6LoWPAN - IPv6 networking over low rate personal area networks
 
-The Linux-ZigBee project goal is to provide complete implementation
-of IEEE 802.15.4 and 6LoWPAN protocols. IEEE 802.15.4 is a stack
+The linux-wpan project goal is to provide a complete implementation
+of the IEEE 802.15.4 and 6LoWPAN protocols. IEEE 802.15.4 is a stack
 of protocols for organizing Low-Rate Wireless Personal Area Networks.
 
 The stack is composed of three main parts:
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ebe94f2..9983825 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -884,8 +884,8 @@
 
 icmp_errors_use_inbound_ifaddr - BOOLEAN
 
-	If zero, icmp error messages are sent with the primary address of
-	the exiting interface.
+	If zero, icmp error messages except redirects are sent with the primary
+	address of the exiting interface.
 
 	If non-zero, the message will be sent with the primary address of
 	the interface that received the packet that caused the icmp error.
@@ -897,8 +897,23 @@
 	then the primary address of the first non-loopback interface that
 	has one will be used regardless of this setting.
 
+	The source address selection of icmp redirect messages is controlled by
+	icmp_errors_use_inbound_ifaddr.
 	Default: 0
 
+icmp_redirects_use_orig_daddr - BOOLEAN
+
+	If zero, icmp redirect messages are sent using the address specified for
+	other icmp errors by icmp_errors_use_inbound_ifaddr.
+
+	If non-zero, the message will be sent with the destination address of
+	the packet that caused the icmp redirect.
+	This behaviour is the preferred one on VRRP routers (see RFC 5798
+	section 8.1.1).
+
+	Default: 0
+
+
 igmp_max_memberships - INTEGER
 	Change the maximum number of multicast groups we can subscribe to.
 	Default: 20
diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt
index 3ba7095..e6b1c02 100644
--- a/Documentation/networking/ipvs-sysctl.txt
+++ b/Documentation/networking/ipvs-sysctl.txt
@@ -157,6 +157,16 @@
 	persistence template if it is to be used to schedule a new
 	connection and the destination server is quiescent.
 
+ignore_tunneled - BOOLEAN
+	0 - disabled (default)
+	not 0 - enabled
+
+	If set, ipvs will set the ipvs_property on all packets which are of
+	unrecognized protocols.  This prevents us from routing tunneled
+	protocols like ipip, which is useful to prevent rescheduling
+	packets that have been tunneled to the ipvs host (i.e. to prevent
+	ipvs routing loops when ipvs is also acting as a real server).
+
 nat_icmp_send - BOOLEAN
         0 - disabled (default)
         not 0 - enabled
diff --git a/Documentation/networking/l2tp.txt b/Documentation/networking/l2tp.txt
index c74434d..4650a00 100644
--- a/Documentation/networking/l2tp.txt
+++ b/Documentation/networking/l2tp.txt
@@ -213,15 +213,12 @@
 and peer 192.168.1.2, using IP addresses 10.5.1.1 and 10.5.1.2 for the
 tunnel endpoints:-
 
-# modprobe l2tp_eth
-# modprobe l2tp_netlink
-
 # ip l2tp add tunnel tunnel_id 1 peer_tunnel_id 1 udp_sport 5000 \
   udp_dport 5000 encap udp local 192.168.1.1 remote 192.168.1.2
 # ip l2tp add session tunnel_id 1 session_id 1 peer_session_id 1
-# ifconfig -a
+# ip -s -d show dev l2tpeth0
 # ip addr add 10.5.1.2/32 peer 10.5.1.1/32 dev l2tpeth0
-# ifconfig l2tpeth0 up
+# ip li set dev l2tpeth0 up
 
 Choose IP addresses to be the address of a local IP interface and that
 of the remote system. The IP addresses of the l2tpeth0 interface can be
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index 476df04..0714fe5 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -115,7 +115,7 @@
 ^^^^^^^^^
 
 The switchdev driver must implement the switchdev op switchdev_port_attr_get
-for SWITCHDEV_ATTR_PORT_PARENT_ID for each port netdev, returning the same
+for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same
 physical ID for each port of a switch.  The ID must be unique between switches
 on the same system.  The ID does not need to be unique between switches on
 different systems.
@@ -178,7 +178,7 @@
 	bridge fdb add ADDR dev DEV [vlan VID] [self]
 
 The driver should use the helper switchdev_port_fdb_xxx ops for ndo_fdb_xxx
-ops, and handle add/delete/dump of SWITCHDEV_OBJ_PORT_FDB object using
+ops, and handle add/delete/dump of SWITCHDEV_OBJ_ID_PORT_FDB object using
 switchdev_port_obj_xxx ops.
 
 XXX: what should be done if offloading this rule to hardware fails (for
@@ -233,26 +233,27 @@
 device port and on the bridge port, and disable learning_sync.
 
 To support learning and learning_sync port attributes, the driver implements
-switchdev op switchdev_port_attr_get/set for SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS.
-The driver should initialize the attributes to the hardware defaults.
+switchdev op switchdev_port_attr_get/set for
+SWITCHDEV_ATTR_PORT_ID_BRIDGE_FLAGS. The driver should initialize the attributes
+to the hardware defaults.
 
 FDB Ageing
 ^^^^^^^^^^
 
-There are two FDB ageing models supported: 1) ageing by the device, and 2)
-ageing by the kernel.  Ageing by the device is preferred if many FDB entries
-are supported.  The driver calls call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
-...) to age out the FDB entry.  In this model, ageing by the kernel should be
-turned off.  XXX: how to turn off ageing in kernel on a per-port basis or
-otherwise prevent the kernel from ageing out the FDB entry?
+The bridge will skip ageing FDB entries marked with NTF_EXT_LEARNED and it is
+the responsibility of the port driver/device to age out these entries.  If the
+port device supports ageing, when the FDB entry expires, it will notify the
+driver which in turn will notify the bridge with SWITCHDEV_FDB_DEL.  If the
+device does not support ageing, the driver can simulate ageing using a
+garbage collection timer to monitor FBD entries.  Expired entries will be
+notified to the bridge using SWITCHDEV_FDB_DEL.  See rocker driver for
+example of driver running ageing timer.
 
-In the kernel ageing model, the standard bridge ageing mechanism is used to age
-out stale FDB entries.  To keep an FDB entry "alive", the driver should refresh
-the FDB entry by calling call_switchdev_notifiers(SWITCHDEV_FDB_ADD, ...).  The
+To keep an NTF_EXT_LEARNED entry "alive", the driver should refresh the FDB
+entry by calling call_switchdev_notifiers(SWITCHDEV_FDB_ADD, ...).  The
 notification will reset the FDB entry's last-used time to now.  The driver
 should rate limit refresh notifications, for example, no more than once a
-second.  If the FDB entry expires, fdb_delete is called to remove entry from
-the device.
+second.  (The last-used time is visible using the bridge -s fdb option).
 
 STP State Change on Port
 ^^^^^^^^^^^^^^^^^^^^^^^^
@@ -260,7 +261,7 @@
 Internally or with a third-party STP protocol implementation (e.g. mstpd), the
 bridge driver maintains the STP state for ports, and will notify the switch
 driver of STP state change on a port using the switchdev op
-switchdev_attr_port_set for SWITCHDEV_ATTR_PORT_STP_UPDATE.
+switchdev_attr_port_set for SWITCHDEV_ATTR_PORT_ID_STP_UPDATE.
 
 State is one of BR_STATE_*.  The switch driver can use STP state updates to
 update ingress packet filter list for the port.  For example, if port is
@@ -316,9 +317,9 @@
 switchdev_port_obj_add is used for both adding a new FIB entry to the device,
 or modifying an existing entry on the device.
 
-XXX: Currently, only SWITCHDEV_OBJ_IPV4_FIB objects are supported.
+XXX: Currently, only SWITCHDEV_OBJ_ID_IPV4_FIB objects are supported.
 
-SWITCHDEV_OBJ_IPV4_FIB object passes:
+SWITCHDEV_OBJ_ID_IPV4_FIB object passes:
 
 	struct switchdev_obj_ipv4_fib {         /* IPV4_FIB */
 		u32 dst;
@@ -369,3 +370,22 @@
 NETEVENT_NEIGH_UPDATE.  The device can be programmed with resolved nexthops
 for the routes as arp_tbl updates.  The driver implements ndo_neigh_destroy
 to know when arp_tbl neighbor entries are purged from the port.
+
+Transaction item queue
+^^^^^^^^^^^^^^^^^^^^^^
+
+For switchdev ops attr_set and obj_add, there is a 2 phase transaction model
+used. First phase is to "prepare" anything needed, including various checks,
+memory allocation, etc. The goal is to handle the stuff that is not unlikely
+to fail here. The second phase is to "commit" the actual changes.
+
+Switchdev provides an inftrastructure for sharing items (for example memory
+allocations) between the two phases.
+
+The object created by a driver in "prepare" phase and it is queued up by:
+switchdev_trans_item_enqueue()
+During the "commit" phase, the driver gets the object by:
+switchdev_trans_item_dequeue()
+
+If a transaction is aborted during "prepare" phase, switchdev code will handle
+cleanup of the queued-up objects.
diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt
new file mode 100644
index 0000000..031ef4a
--- /dev/null
+++ b/Documentation/networking/vrf.txt
@@ -0,0 +1,96 @@
+Virtual Routing and Forwarding (VRF)
+====================================
+The VRF device combined with ip rules provides the ability to create virtual
+routing and forwarding domains (aka VRFs, VRF-lite to be specific) in the
+Linux network stack. One use case is the multi-tenancy problem where each
+tenant has their own unique routing tables and in the very least need
+different default gateways.
+
+Processes can be "VRF aware" by binding a socket to the VRF device. Packets
+through the socket then use the routing table associated with the VRF
+device. An important feature of the VRF device implementation is that it
+impacts only Layer 3 and above so L2 tools (e.g., LLDP) are not affected
+(ie., they do not need to be run in each VRF). The design also allows
+the use of higher priority ip rules (Policy Based Routing, PBR) to take
+precedence over the VRF device rules directing specific traffic as desired.
+
+In addition, VRF devices allow VRFs to be nested within namespaces. For
+example network namespaces provide separation of network interfaces at L1
+(Layer 1 separation), VLANs on the interfaces within a namespace provide
+L2 separation and then VRF devices provide L3 separation.
+
+Design
+------
+A VRF device is created with an associated route table. Network interfaces
+are then enslaved to a VRF device:
+
+         +-----------------------------+
+         |           vrf-blue          |  ===> route table 10
+         +-----------------------------+
+            |        |            |
+         +------+ +------+     +-------------+
+         | eth1 | | eth2 | ... |    bond1    |
+         +------+ +------+     +-------------+
+                                  |       |
+                              +------+ +------+
+                              | eth8 | | eth9 |
+                              +------+ +------+
+
+Packets received on an enslaved device and are switched to the VRF device
+using an rx_handler which gives the impression that packets flow through
+the VRF device. Similarly on egress routing rules are used to send packets
+to the VRF device driver before getting sent out the actual interface. This
+allows tcpdump on a VRF device to capture all packets into and out of the
+VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied
+using the VRF device to specify rules that apply to the VRF domain as a whole.
+
+[1] Packets in the forwarded state do not flow through the device, so those
+    packets are not seen by tcpdump. Will revisit this limitation in a
+    future release.
+
+[2] Iptables on ingress is limited to NF_INET_PRE_ROUTING only with skb->dev
+    set to real ingress device and egress is limited to NF_INET_POST_ROUTING.
+    Will revisit this limitation in a future release.
+
+
+Setup
+-----
+1. VRF device is created with an association to a FIB table.
+   e.g, ip link add vrf-blue type vrf table 10
+        ip link set dev vrf-blue up
+
+2. Rules are added that send lookups to the associated FIB table when the
+   iif or oif is the VRF device. e.g.,
+       ip ru add oif vrf-blue table 10
+       ip ru add iif vrf-blue table 10
+
+   Set the default route for the table (and hence default route for the VRF).
+   e.g, ip route add table 10 prohibit default
+
+3. Enslave L3 interfaces to a VRF device.
+   e.g,  ip link set dev eth1 master vrf-blue
+
+   Local and connected routes for enslaved devices are automatically moved to
+   the table associated with VRF device. Any additional routes depending on
+   the enslaved device will need to be reinserted following the enslavement.
+
+4. Additional VRF routes are added to associated table.
+   e.g., ip route add table 10 ...
+
+
+Applications
+------------
+Applications that are to work within a VRF need to bind their socket to the
+VRF device:
+
+    setsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, dev, strlen(dev)+1);
+
+or to specify the output device using cmsg and IP_PKTINFO.
+
+
+Limitations
+-----------
+VRF device currently only works for IPv4. Support for IPv6 is under development.
+
+Index of original ingress interface is not available via cmsg. Will address
+soon.
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt
index 62328d7..b0e911e 100644
--- a/Documentation/power/pci.txt
+++ b/Documentation/power/pci.txt
@@ -979,20 +979,45 @@
 (alternatively, the runtime_suspend() callback will have to check if the
 device should really be suspended and return -EAGAIN if that is not the case).
 
-The runtime PM of PCI devices is disabled by default.  It is also blocked by
-pci_pm_init() that runs the pm_runtime_forbid() helper function.  If a PCI
-driver implements the runtime PM callbacks and intends to use the runtime PM
-framework provided by the PM core and the PCI subsystem, it should enable this
-feature by executing the pm_runtime_enable() helper function.  However, the
-driver should not call the pm_runtime_allow() helper function unblocking
-the runtime PM of the device.  Instead, it should allow user space or some
-platform-specific code to do that (user space can do it via sysfs), although
-once it has called pm_runtime_enable(), it must be prepared to handle the
+The runtime PM of PCI devices is enabled by default by the PCI core.  PCI
+device drivers do not need to enable it and should not attempt to do so.
+However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid()
+helper function.  In addition to that, the runtime PM usage counter of
+each PCI device is incremented by local_pci_probe() before executing the
+probe callback provided by the device's driver.
+
+If a PCI driver implements the runtime PM callbacks and intends to use the
+runtime PM framework provided by the PM core and the PCI subsystem, it needs
+to decrement the device's runtime PM usage counter in its probe callback
+function.  If it doesn't do that, the counter will always be different from
+zero for the device and it will never be runtime-suspended.  The simplest
+way to do that is by calling pm_runtime_put_noidle(), but if the driver
+wants to schedule an autosuspend right away, for example, it may call
+pm_runtime_put_autosuspend() instead for this purpose.  Generally, it
+just needs to call a function that decrements the devices usage counter
+from its probe routine to make runtime PM work for the device.
+
+It is important to remember that the driver's runtime_suspend() callback
+may be executed right after the usage counter has been decremented, because
+user space may already have cuased the pm_runtime_allow() helper function
+unblocking the runtime PM of the device to run via sysfs, so the driver must
+be prepared to cope with that.
+
+The driver itself should not call pm_runtime_allow(), though.  Instead, it
+should let user space or some platform-specific code do that (user space can
+do it via sysfs as stated above), but it must be prepared to handle the
 runtime PM of the device correctly as soon as pm_runtime_allow() is called
-(which may happen at any time).  [It also is possible that user space causes
-pm_runtime_allow() to be called via sysfs before the driver is loaded, so in
-fact the driver has to be prepared to handle the runtime PM of the device as
-soon as it calls pm_runtime_enable().]
+(which may happen at any time, even before the driver is loaded).
+
+When the driver's remove callback runs, it has to balance the decrementation
+of the device's runtime PM usage counter at the probe time.  For this reason,
+if it has decremented the counter in its probe callback, it must run
+pm_runtime_get_noresume() in its remove callback.  [Since the core carries
+out a runtime resume of the device and bumps up the device's usage counter
+before running the driver's remove callback, the runtime PM of the device
+is effectively disabled for the duration of the remove execution and all
+runtime PM helper functions incrementing the device's usage counter are
+then effectively equivalent to pm_runtime_get_noresume().]
 
 The runtime PM framework works by processing requests to suspend or resume
 devices, or to check if they are idle (in which cases it is reasonable to
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index 2bc8abc..6c6247a 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -18,6 +18,7 @@
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__        /* For PPC64, to get LL64 types */
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>
diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt
index f4cb0b2..477927b 100644
--- a/Documentation/static-keys.txt
+++ b/Documentation/static-keys.txt
@@ -15,8 +15,8 @@
 
 DEFINE_STATIC_KEY_TRUE(key);
 DEFINE_STATIC_KEY_FALSE(key);
-static_key_likely()
-statick_key_unlikely()
+static_branch_likely()
+static_branch_unlikely()
 
 0) Abstract
 
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 6294b51..809ab6e 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -54,13 +54,15 @@
 --------------
 
 The default queuing discipline to use for network devices. This allows
-overriding the default queue discipline of pfifo_fast with an
-alternative. Since the default queuing discipline is created with the
-no additional parameters so is best suited to queuing disciplines that
-work well without configuration like stochastic fair queue (sfq),
-CoDel (codel) or fair queue CoDel (fq_codel). Don't use queuing disciplines
-like Hierarchical Token Bucket or Deficit Round Robin which require setting
-up classes and bandwidths.
+overriding the default of pfifo_fast with an alternative. Since the default
+queuing discipline is created without additional parameters so is best suited
+to queuing disciplines that work well without configuration like stochastic
+fair queue (sfq), CoDel (codel) or fair queue CoDel (fq_codel). Don't use
+queuing disciplines like Hierarchical Token Bucket or Deficit Round Robin
+which require setting up classes and bandwidths. Note that physical multiqueue
+interfaces still use mq as root qdisc, which in turn uses this default for its
+leaves. Virtual devices (like e.g. lo or veth) ignore this setting and instead
+default to noqueue.
 Default: pfifo_fast
 
 busy_read
diff --git a/Documentation/thermal/power_allocator.txt b/Documentation/thermal/power_allocator.txt
index c3797b5..a1ce223 100644
--- a/Documentation/thermal/power_allocator.txt
+++ b/Documentation/thermal/power_allocator.txt
@@ -4,7 +4,7 @@
 Trip points
 -----------
 
-The governor requires the following two passive trip points:
+The governor works optimally with the following two passive trip points:
 
 1.  "switch on" trip point: temperature above which the governor
     control loop starts operating.  This is the first passive trip
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index c1f6864..10f062e 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -180,6 +180,7 @@
     |---temp:			Current temperature
     |---mode:			Working mode of the thermal zone
     |---policy:			Thermal governor used for this zone
+    |---available_policies:	Available thermal governors for this zone
     |---trip_point_[0-*]_temp:	Trip point temperature
     |---trip_point_[0-*]_type:	Trip point type
     |---trip_point_[0-*]_hyst:	Hysteresis value for this trip point
@@ -256,6 +257,10 @@
 	One of the various thermal governors used for a particular zone.
 	RW, Required
 
+available_policies
+	Available thermal governors which can be used for a particular zone.
+	RO, Required
+
 trip_point_[0-*]_temp
 	The temperature above which trip point will be fired.
 	Unit: millidegree Celsius
@@ -417,6 +422,7 @@
     |---temp:			37000
     |---mode:			enabled
     |---policy:			step_wise
+    |---available_policies:	step_wise fair_share
     |---trip_point_0_temp:	100000
     |---trip_point_0_type:	critical
     |---trip_point_1_temp:	80000
diff --git a/Documentation/watchdog/src/watchdog-test.c b/Documentation/watchdog/src/watchdog-test.c
index 3da8229..fcdde8f 100644
--- a/Documentation/watchdog/src/watchdog-test.c
+++ b/Documentation/watchdog/src/watchdog-test.c
@@ -41,6 +41,7 @@
 int main(int argc, char *argv[])
 {
     int flags;
+    unsigned int ping_rate = 1;
 
     fd = open("/dev/watchdog", O_WRONLY);
 
@@ -63,22 +64,33 @@
 	    fprintf(stderr, "Watchdog card enabled.\n");
 	    fflush(stderr);
 	    goto end;
+	} else if (!strncasecmp(argv[1], "-t", 2) && argv[2]) {
+	    flags = atoi(argv[2]);
+	    ioctl(fd, WDIOC_SETTIMEOUT, &flags);
+	    fprintf(stderr, "Watchdog timeout set to %u seconds.\n", flags);
+	    fflush(stderr);
+	    goto end;
+	} else if (!strncasecmp(argv[1], "-p", 2) && argv[2]) {
+	    ping_rate = strtoul(argv[2], NULL, 0);
+	    fprintf(stderr, "Watchdog ping rate set to %u seconds.\n", ping_rate);
+	    fflush(stderr);
 	} else {
-	    fprintf(stderr, "-d to disable, -e to enable.\n");
+	    fprintf(stderr, "-d to disable, -e to enable, -t <n> to set " \
+		"the timeout,\n-p <n> to set the ping rate, and \n");
 	    fprintf(stderr, "run by itself to tick the card.\n");
 	    fflush(stderr);
 	    goto end;
 	}
-    } else {
-	fprintf(stderr, "Watchdog Ticking Away!\n");
-	fflush(stderr);
     }
 
+    fprintf(stderr, "Watchdog Ticking Away!\n");
+    fflush(stderr);
+
     signal(SIGINT, term);
 
     while(1) {
 	keep_alive();
-	sleep(1);
+	sleep(ping_rate);
     }
 end:
     close(fd);
diff --git a/MAINTAINERS b/MAINTAINERS
index 310da42..e412601 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -615,9 +615,8 @@
 F:	drivers/hwmon/fam15h_power.c
 
 AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER
-M:	Thomas Dahlmann <dahlmann.thomas@arcor.de>
 L:	linux-geode@lists.infradead.org (moderated for non-subscribers)
-S:	Supported
+S:	Orphan
 F:	drivers/usb/gadget/udc/amd5536udc.*
 
 AMD GEODE PROCESSOR/CHIPSET SUPPORT
@@ -808,6 +807,13 @@
 F:	drivers/video/fbdev/arcfb.c
 F:	drivers/video/fbdev/core/fb_defio.c
 
+ARCNET NETWORK LAYER
+M:	Michael Grzeschik <m.grzeschik@pengutronix.de>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/net/arcnet/
+F:	include/uapi/linux/if_arcnet.h
+
 ARM MFM AND FLOPPY DRIVERS
 M:	Ian Molton <spyro@f2s.com>
 S:	Maintained
@@ -3394,7 +3400,6 @@
 
 DIGI EPCA PCI PRODUCTS
 M:	Lidza Louina <lidza.louina@gmail.com>
-M:	Mark Hounschell <markh@compro.net>
 M:	Daeseok Youn <daeseok.youn@gmail.com>
 L:	driverdev-devel@linuxdriverproject.org
 S:	Maintained
@@ -5541,7 +5546,7 @@
 INTEL WIRELESS WIFI LINK (iwlwifi)
 M:	Johannes Berg <johannes.berg@intel.com>
 M:	Emmanuel Grumbach <emmanuel.grumbach@intel.com>
-M:	Intel Linux Wireless <ilw@linux.intel.com>
+M:	Intel Linux Wireless <linuxwifi@intel.com>
 L:	linux-wireless@vger.kernel.org
 W:	http://intellinuxwireless.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
@@ -5952,7 +5957,7 @@
 KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V
 M:	Joerg Roedel <joro@8bytes.org>
 L:	kvm@vger.kernel.org
-W:	http://kvm.qumranet.com
+W:	http://www.linux-kvm.org/
 S:	Maintained
 F:	arch/x86/include/asm/svm.h
 F:	arch/x86/kvm/svm.c
@@ -5960,7 +5965,7 @@
 KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
 M:	Alexander Graf <agraf@suse.com>
 L:	kvm-ppc@vger.kernel.org
-W:	http://kvm.qumranet.com
+W:	http://www.linux-kvm.org/
 T:	git git://github.com/agraf/linux-2.6.git
 S:	Supported
 F:	arch/powerpc/include/asm/kvm*
@@ -6088,6 +6093,13 @@
 F:	drivers/auxdisplay/ks0108.c
 F:	include/linux/ks0108.h
 
+L3MDEV
+M:	David Ahern <dsa@cumulusnetworks.com>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	net/l3mdev
+F:	include/net/l3mdev.h
+
 LAPB module
 L:	linux-x25@vger.kernel.org
 S:	Orphan
@@ -6452,11 +6464,11 @@
 LTP (Linux Test Project)
 M:	Mike Frysinger <vapier@gentoo.org>
 M:	Cyril Hrubis <chrubis@suse.cz>
-M:	Wanlong Gao <gaowanlong@cn.fujitsu.com>
+M:	Wanlong Gao <wanlong.gao@gmail.com>
 M:	Jan Stancek <jstancek@redhat.com>
 M:	Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com>
 M:	Alexey Kodanev <alexey.kodanev@oracle.com>
-L:	ltp-list@lists.sourceforge.net (subscribers-only)
+L:	ltp@lists.linux.it (subscribers-only)
 W:	http://linux-test-project.github.io/
 T:	git git://github.com/linux-test-project/ltp.git
 S:	Maintained
@@ -6789,6 +6801,14 @@
 Q:	http://patchwork.ozlabs.org/project/netdev/list/
 F:	drivers/net/ethernet/mellanox/mlxsw/
 
+MEMBARRIER SUPPORT
+M:	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+L:	linux-kernel@vger.kernel.org
+S:	Supported
+F:	kernel/membarrier.c
+F:	include/uapi/linux/membarrier.h
+
 MEMORY MANAGEMENT
 L:	linux-mm@kvack.org
 W:	http://www.linux-mm.org
@@ -6958,6 +6978,7 @@
 L:	linux-wpan@vger.kernel.org
 S:	Maintained
 F:	drivers/net/ieee802154/mrf24j40.c
+F:	Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt
 
 MSI LAPTOP SUPPORT
 M:	"Lee, Chun-Yi" <jlee@suse.com>
@@ -7292,7 +7313,6 @@
 F:	drivers/net/
 F:	include/linux/if_*
 F:	include/linux/netdevice.h
-F:	include/linux/arcdevice.h
 F:	include/linux/etherdevice.h
 F:	include/linux/fcdevice.h
 F:	include/linux/fddidevice.h
@@ -7396,6 +7416,7 @@
 M:	Jon Mason <jdmason@kudzu.us>
 M:	Dave Jiang <dave.jiang@intel.com>
 M:	Allen Hubbe <Allen.Hubbe@emc.com>
+L:	linux-ntb@googlegroups.com
 S:	Supported
 W:	https://github.com/jonmason/ntb/wiki
 T:	git git://github.com/jonmason/ntb.git
@@ -7407,6 +7428,7 @@
 NTB INTEL DRIVER
 M:	Jon Mason <jdmason@kudzu.us>
 M:	Dave Jiang <dave.jiang@intel.com>
+L:	linux-ntb@googlegroups.com
 S:	Supported
 W:	https://github.com/jonmason/ntb/wiki
 T:	git git://github.com/jonmason/ntb.git
@@ -8490,7 +8512,6 @@
 F:	drivers/net/ethernet/qlogic/qla3xxx.*
 
 QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M:	Shahed Shaikh <shahed.shaikh@qlogic.com>
 M:	Dept-GELinuxNICDev@qlogic.com
 L:	netdev@vger.kernel.org
 S:	Supported
@@ -8856,6 +8877,13 @@
 F:	drivers/net/wireless/rtlwifi/
 F:	drivers/net/wireless/rtlwifi/rtl8192ce/
 
+RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
+M:	Jes Sorensen <Jes.Sorensen@redhat.com>
+L:	linux-wireless@vger.kernel.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8723au-mac80211
+S:	Maintained
+F:	drivers/net/wireless/realtek/rtl8xxxu/
+
 S3 SAVAGE FRAMEBUFFER DRIVER
 M:	Antonino Daplas <adaplas@gmail.com>
 L:	linux-fbdev@vger.kernel.org
@@ -9894,8 +9922,8 @@
 STAGING - LUSTRE PARALLEL FILESYSTEM
 M:	Oleg Drokin <oleg.drokin@intel.com>
 M:	Andreas Dilger <andreas.dilger@intel.com>
-L:	HPDD-discuss@lists.01.org (moderated for non-subscribers)
-W:	http://lustre.opensfs.org/
+L:	lustre-devel@lists.lustre.org (moderated for non-subscribers)
+W:	http://wiki.lustre.org/
 S:	Maintained
 F:	drivers/staging/lustre
 
@@ -10328,6 +10356,16 @@
 F:	include/linux/cpu_cooling.h
 F:	Documentation/devicetree/bindings/thermal/
 
+THERMAL/CPU_COOLING
+M:	Amit Daniel Kachhap <amit.kachhap@gmail.com>
+M:	Viresh Kumar <viresh.kumar@linaro.org>
+M:	Javi Merino <javi.merino@arm.com>
+L:	linux-pm@vger.kernel.org
+S:	Supported
+F:	Documentation/thermal/cpu-cooling-api.txt
+F:	drivers/thermal/cpu_cooling.c
+F:	include/linux/cpu_cooling.h
+
 THINGM BLINK(1) USB RGB LED DRIVER
 M:	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
 S:	Maintained
@@ -11177,7 +11215,7 @@
 F:	include/linux/vlynq.h
 
 VME SUBSYSTEM
-M:	Martyn Welch <martyn.welch@ge.com>
+M:	Martyn Welch <martyn@welchs.me.uk>
 M:	Manohar Vanga <manohar.vanga@gmail.com>
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:	devel@driverdev.osuosl.org
@@ -11229,7 +11267,6 @@
 M:	Liam Girdwood <lgirdwood@gmail.com>
 M:	Mark Brown <broonie@kernel.org>
 L:	linux-kernel@vger.kernel.org
-W:	http://opensource.wolfsonmicro.com/node/15
 W:	http://www.slimlogic.co.uk/?p=48
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git
 S:	Supported
@@ -11242,7 +11279,7 @@
 L:	netdev@vger.kernel.org
 S:	Maintained
 F:	drivers/net/vrf.c
-F:	include/net/vrf.h
+F:	Documentation/networking/vrf.txt
 
 VT1211 HARDWARE MONITOR DRIVER
 M:	Juerg Haefliger <juergh@gmail.com>
@@ -11358,17 +11395,15 @@
 M:	Mark Brown <broonie@kernel.org>
 M:	Liam Girdwood <lrg@slimlogic.co.uk>
 L:	linux-input@vger.kernel.org
-T:	git git://opensource.wolfsonmicro.com/linux-2.6-touch
-W:	http://opensource.wolfsonmicro.com/node/7
+W:	https://github.com/CirrusLogic/linux-drivers/wiki
 S:	Supported
 F:	drivers/input/touchscreen/*wm97*
 F:	include/linux/wm97xx.h
 
 WOLFSON MICROELECTRONICS DRIVERS
 L:	patches@opensource.wolfsonmicro.com
-T:	git git://opensource.wolfsonmicro.com/linux-2.6-asoc
-T:	git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
-W:	http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
+T:	git https://github.com/CirrusLogic/linux-drivers.git
+W:	https://github.com/CirrusLogic/linux-drivers/wiki
 S:	Supported
 F:	Documentation/hwmon/wm83??
 F:	arch/arm/mach-s3c64xx/mach-crag6410*
diff --git a/Makefile b/Makefile
index f2d2706..1d341eb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
-PATCHLEVEL = 2
+PATCHLEVEL = 3
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc3
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index f05bdb4..ff40491 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -297,7 +297,9 @@
 					     unsigned long size)
 {
 	return ioremap(offset, size);
-} 
+}
+
+#define ioremap_uc ioremap_nocache
 
 static inline void iounmap(volatile void __iomem *addr)
 {
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index 2804648..2d6efcf 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -117,6 +117,6 @@
 	}
 
 	irq_enter();
-	generic_handle_irq_desc(irq, desc);
+	generic_handle_irq_desc(desc);
 	irq_exit();
 }
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index cded02c..5f387ee 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -242,7 +242,12 @@
 
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
-	struct pci_dev *dev;
+	struct pci_dev *dev = bus->self;
+
+	if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
+	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+		pci_read_bridge_bases(bus);
+	}
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		pdev_save_srm_config(dev);
diff --git a/arch/alpha/lib/udelay.c b/arch/alpha/lib/udelay.c
index 69d52aa..f2d81ff 100644
--- a/arch/alpha/lib/udelay.c
+++ b/arch/alpha/lib/udelay.c
@@ -30,6 +30,7 @@
 		"	bgt %0,1b"
 		: "=&r" (tmp), "=r" (loops) : "1"(loops));
 }
+EXPORT_SYMBOL(__delay);
 
 #ifdef CONFIG_SMP
 #define LPJ	 cpu_data[smp_processor_id()].loops_per_jiffy
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index d9e44b6..4ffd185 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -252,7 +252,7 @@
 
 static int idu_first_irq;
 
-static void idu_cascade_isr(unsigned int __core_irq, struct irq_desc *desc)
+static void idu_cascade_isr(struct irq_desc *desc)
 {
 	struct irq_domain *domain = irq_desc_get_handler_data(desc);
 	unsigned int core_irq = irq_desc_get_irq(desc);
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index ad9825d..0a77b19 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -402,6 +402,8 @@
 	unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
 	if (num_cores > 2)
 		arc_set_core_freq(50 * 1000000);
+	else if (num_cores == 2)
+		arc_set_core_freq(75 * 1000000);
 #endif
 
 	switch (arc_get_core_freq()/1000000) {
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 7451b44..2c2b28e 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -54,6 +54,14 @@
 LD		+= -EL
 endif
 
+#
+# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
+# later may result in code being generated that handles signed short and signed
+# char struct members incorrectly. So disable it.
+# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
+#
+KBUILD_CFLAGS	+= $(call cc-option,-fno-ipa-sra)
+
 # This selects which instruction set is used.
 # Note that GCC does not numerically define an architecture version
 # macro, but instead defines a whole series of macros which makes
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi
index 4d28fc3..5dd084f 100644
--- a/arch/arm/boot/dts/am335x-phycore-som.dtsi
+++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi
@@ -252,10 +252,10 @@
 		};
 
 		vdd1_reg: regulator@2 {
-			/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
+			/* VDD_MPU voltage limits 0.95V - 1.325V with +/-4% tolerance */
 			regulator-name = "vdd_mpu";
 			regulator-min-microvolt = <912500>;
-			regulator-max-microvolt = <1312500>;
+			regulator-max-microvolt = <1378000>;
 			regulator-boot-on;
 			regulator-always-on;
 		};
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 0447c04a..d83ff9c 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -591,6 +591,7 @@
 			cpts_clock_mult = <0x80000000>;
 			cpts_clock_shift = <29>;
 			ranges;
+			syscon = <&scm_conf>;
 
 			davinci_mdio: mdio@4a101000 {
 				compatible = "ti,am4372-mdio","ti,davinci_mdio";
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 3a05b94..568adf5 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -98,13 +98,6 @@
 		pinctrl-0 = <&extcon_usb1_pins>;
 	};
 
-	extcon_usb2: extcon_usb2 {
-		compatible = "linux,extcon-usb-gpio";
-		id-gpio = <&gpio7 24 GPIO_ACTIVE_HIGH>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&extcon_usb2_pins>;
-	};
-
 	hdmi0: connector {
 		compatible = "hdmi-connector";
 		label = "hdmi";
@@ -326,12 +319,6 @@
 		>;
 	};
 
-	extcon_usb2_pins: extcon_usb2_pins {
-		pinctrl-single,pins = <
-			0x3e8 (PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_ctsn.gpio7_24 */
-		>;
-	};
-
 	tpd12s015_pins: pinmux_tpd12s015_pins {
 		pinctrl-single,pins = <
 			0x3b0 (PIN_OUTPUT | MUX_MODE14)		/* gpio7_10 CT_CP_HPD */
@@ -432,7 +419,7 @@
 				};
 
 				ldo3_reg: ldo3 {
-					/* VDDA_1V8_PHY */
+					/* VDDA_1V8_PHYA */
 					regulator-name = "ldo3";
 					regulator-min-microvolt = <1800000>;
 					regulator-max-microvolt = <1800000>;
@@ -440,6 +427,15 @@
 					regulator-boot-on;
 				};
 
+				ldo4_reg: ldo4 {
+					/* VDDA_1V8_PHYB */
+					regulator-name = "ldo4";
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
 				ldo9_reg: ldo9 {
 					/* VDD_RTC */
 					regulator-name = "ldo9";
@@ -495,6 +491,14 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
+
+		extcon_usb2: tps659038_usb {
+			compatible = "ti,palmas-usb-vid";
+			ti,enable-vbus-detection;
+			ti,enable-id-detection;
+			id-gpios = <&gpio7 24 GPIO_ACTIVE_HIGH>;
+		};
+
 	};
 
 	tmp102: tmp102@48 {
@@ -517,7 +521,8 @@
 	mcp_rtc: rtc@6f {
 		compatible = "microchip,mcp7941x";
 		reg = <0x6f>;
-		interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>;  /* IRQ_SYS_1N */
+		interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>,
+				      <&dra7_pmx_core 0x424>;
 
 		pinctrl-names = "default";
 		pinctrl-0 = <&mcp79410_pins_default>;
@@ -579,7 +584,6 @@
 	pinctrl-0 = <&mmc1_pins_default>;
 
 	vmmc-supply = <&ldo1_reg>;
-	vmmc_aux-supply = <&vdd_3v3>;
 	bus-width = <4>;
 	cd-gpios = <&gpio6 27 0>; /* gpio 219 */
 };
@@ -623,6 +627,14 @@
 };
 
 &usb2 {
+	/*
+	 * Stand alone usage is peripheral only.
+	 * However, with some resistor modifications
+	 * this port can be used via expansion connectors
+	 * as "host" or "dual-role". If so, provide
+	 * the necessary dr_mode override in the expansion
+	 * board's DT.
+	 */
 	dr_mode = "peripheral";
 };
 
@@ -681,7 +693,7 @@
 
 &hdmi {
 	status = "ok";
-	vdda-supply = <&ldo3_reg>;
+	vdda-supply = <&ldo4_reg>;
 
 	pinctrl-names = "default";
 	pinctrl-0 = <&hdmi_pins>;
diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts
index 92bacd3..109fd47 100644
--- a/arch/arm/boot/dts/dm8148-evm.dts
+++ b/arch/arm/boot/dts/dm8148-evm.dts
@@ -19,10 +19,10 @@
 
 &cpsw_emac0 {
 	phy_id = <&davinci_mdio>, <0>;
-	phy-mode = "mii";
+	phy-mode = "rgmii";
 };
 
 &cpsw_emac1 {
 	phy_id = <&davinci_mdio>, <1>;
-	phy-mode = "mii";
+	phy-mode = "rgmii";
 };
diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts
index 8c4bbc7..79838dd 100644
--- a/arch/arm/boot/dts/dm8148-t410.dts
+++ b/arch/arm/boot/dts/dm8148-t410.dts
@@ -8,7 +8,7 @@
 #include "dm814x.dtsi"
 
 / {
-	model = "DM8148 EVM";
+	model = "HP t410 Smart Zero Client";
 	compatible = "hp,t410", "ti,dm8148";
 
 	memory {
@@ -19,10 +19,10 @@
 
 &cpsw_emac0 {
 	phy_id = <&davinci_mdio>, <0>;
-	phy-mode = "mii";
+	phy-mode = "rgmii";
 };
 
 &cpsw_emac1 {
 	phy_id = <&davinci_mdio>, <1>;
-	phy-mode = "mii";
+	phy-mode = "rgmii";
 };
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index 972c9c9..7988b42 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -181,9 +181,9 @@
 				ti,hwmods = "timer3";
 			};
 
-			control: control@160000 {
+			control: control@140000 {
 				compatible = "ti,dm814-scm", "simple-bus";
-				reg = <0x160000 0x16d000>;
+				reg = <0x140000 0x16d000>;
 				#address-cells = <1>;
 				#size-cells = <1>;
 				ranges = <0 0x160000 0x16d000>;
@@ -321,9 +321,9 @@
 				mac-address = [ 00 00 00 00 00 00 ];
 			};
 
-			phy_sel: cpsw-phy-sel@0x48160650 {
+			phy_sel: cpsw-phy-sel@48140650 {
 				compatible = "ti,am3352-cpsw-phy-sel";
-				reg= <0x48160650 0x4>;
+				reg= <0x48140650 0x4>;
 				reg-names = "gmii-sel";
 			};
 		};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 5d65db9..8fedddc 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -120,9 +120,10 @@
 					reg = <0x0 0x1400>;
 					#address-cells = <1>;
 					#size-cells = <1>;
+					ranges = <0 0x0 0x1400>;
 
 					pbias_regulator: pbias_regulator {
-						compatible = "ti,pbias-omap";
+						compatible = "ti,pbias-dra7", "ti,pbias-omap";
 						reg = <0xe00 0x4>;
 						syscon = <&scm_conf>;
 						pbias_mmc_reg: pbias_mmc_omap5 {
@@ -1417,7 +1418,7 @@
 			ti,irqs-safe-map = <0>;
 		};
 
-		mac: ethernet@4a100000 {
+		mac: ethernet@48484000 {
 			compatible = "ti,dra7-cpsw","ti,cpsw";
 			ti,hwmods = "gmac";
 			clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>;
@@ -1447,6 +1448,7 @@
 				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
 			ranges;
+			syscon = <&scm_conf>;
 			status = "disabled";
 
 			davinci_mdio: mdio@48485000 {
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 2390f38..798dda0 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -56,6 +56,7 @@
 					reg = <0x270 0x240>;
 					#address-cells = <1>;
 					#size-cells = <1>;
+					ranges = <0 0x270 0x240>;
 
 					scm_clocks: clocks {
 						#address-cells = <1>;
@@ -63,7 +64,7 @@
 					};
 
 					pbias_regulator: pbias_regulator {
-						compatible = "ti,pbias-omap";
+						compatible = "ti,pbias-omap2", "ti,pbias-omap";
 						reg = <0x230 0x4>;
 						syscon = <&scm_conf>;
 						pbias_mmc_reg: pbias_mmc_omap2430 {
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index a547411..67659a0 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -202,7 +202,7 @@
 
 	tfp410_pins: pinmux_tfp410_pins {
 		pinctrl-single,pins = <
-			0x194 (PIN_OUTPUT | MUX_MODE4)	/* hdq_sio.gpio_170 */
+			0x196 (PIN_OUTPUT | MUX_MODE4)	/* hdq_sio.gpio_170 */
 		>;
 	};
 
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index d5e5cd4..2230e1c 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -78,12 +78,6 @@
 		>;
 	};
 
-	smsc9221_pins: pinmux_smsc9221_pins {
-		pinctrl-single,pins = <
-			0x1a2 (PIN_INPUT | MUX_MODE4)		/* mcspi1_cs2.gpio_176 */
-		>;
-	};
-
 	i2c1_pins: pinmux_i2c1_pins {
 		pinctrl-single,pins = <
 			0x18a (PIN_INPUT | MUX_MODE0)   /* i2c1_scl.i2c1_scl */
diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
index e458c21..5ad688c 100644
--- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
@@ -156,6 +156,12 @@
 			OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0)	/* uart2_rx.uart2_rx */
 		>;
 	};
+
+	smsc9221_pins: pinmux_smsc9221_pins {
+		pinctrl-single,pins = <
+			OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT | MUX_MODE4)	/* mcspi1_cs2.gpio_176 */
+		>;
+	};
 };
 
 &omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 69a40cf..8a2b253 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -113,10 +113,22 @@
 				};
 
 				scm_conf: scm_conf@270 {
-					compatible = "syscon";
+					compatible = "syscon", "simple-bus";
 					reg = <0x270 0x330>;
 					#address-cells = <1>;
 					#size-cells = <1>;
+					ranges = <0 0x270 0x330>;
+
+					pbias_regulator: pbias_regulator {
+						compatible = "ti,pbias-omap3", "ti,pbias-omap";
+						reg = <0x2b0 0x4>;
+						syscon = <&scm_conf>;
+						pbias_mmc_reg: pbias_mmc_omap2430 {
+							regulator-name = "pbias_mmc_omap2430";
+							regulator-min-microvolt = <1800000>;
+							regulator-max-microvolt = <3000000>;
+						};
+					};
 
 					scm_clocks: clocks {
 						#address-cells = <1>;
@@ -202,17 +214,6 @@
 			dma-requests = <96>;
 		};
 
-		pbias_regulator: pbias_regulator {
-			compatible = "ti,pbias-omap";
-			reg = <0x2b0 0x4>;
-			syscon = <&scm_conf>;
-			pbias_mmc_reg: pbias_mmc_omap2430 {
-				regulator-name = "pbias_mmc_omap2430";
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <3000000>;
-			};
-		};
-
 		gpio1: gpio@48310000 {
 			compatible = "ti,omap3-gpio";
 			reg = <0x48310000 0x200>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index abc4473..5a206c1 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -196,9 +196,10 @@
 					reg = <0x5a0 0x170>;
 					#address-cells = <1>;
 					#size-cells = <1>;
+					ranges = <0 0x5a0 0x170>;
 
 					pbias_regulator: pbias_regulator {
-						compatible = "ti,pbias-omap";
+						compatible = "ti,pbias-omap4", "ti,pbias-omap";
 						reg = <0x60 0x4>;
 						syscon = <&omap4_padconf_global>;
 						pbias_mmc_reg: pbias_mmc_omap4 {
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 3cc8f35..3cb030f 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -174,8 +174,8 @@
 
 	i2c5_pins: pinmux_i2c5_pins {
 		pinctrl-single,pins = <
-			0x184 (PIN_INPUT | MUX_MODE0)		/* i2c5_scl */
-			0x186 (PIN_INPUT | MUX_MODE0)		/* i2c5_sda */
+			0x186 (PIN_INPUT | MUX_MODE0)		/* i2c5_scl */
+			0x188 (PIN_INPUT | MUX_MODE0)		/* i2c5_sda */
 		>;
 	};
 
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 4205a8a..4c04389 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -185,9 +185,10 @@
 					reg = <0x5a0 0xec>;
 					#address-cells = <1>;
 					#size-cells = <1>;
+					ranges = <0 0x5a0 0xec>;
 
 					pbias_regulator: pbias_regulator {
-						compatible = "ti,pbias-omap";
+						compatible = "ti,pbias-omap5", "ti,pbias-omap";
 						reg = <0x60 0x4>;
 						syscon = <&omap5_padconf_global>;
 						pbias_mmc_reg: pbias_mmc_omap5 {
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 2fa7a0d..275c78c 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -158,6 +158,7 @@
 };
 
 &hdmi {
+	ddc-i2c-bus = <&i2c5>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi
index 3efa3b2..6b914e4 100644
--- a/arch/arm/boot/dts/stih407.dtsi
+++ b/arch/arm/boot/dts/stih407.dtsi
@@ -103,48 +103,46 @@
 							 <&clk_s_d0_quadfs 0>,
 							 <&clk_s_d2_quadfs 0>,
 							 <&clk_s_d2_quadfs 0>;
-				ranges;
+			};
 
-				sti-hdmi@8d04000 {
-					compatible = "st,stih407-hdmi";
-					reg = <0x8d04000 0x1000>;
-					reg-names = "hdmi-reg";
-					interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
-					interrupt-names	= "irq";
-					clock-names = "pix",
-						      "tmds",
-						      "phy",
-						      "audio",
-						      "main_parent",
-						      "aux_parent";
+			sti-hdmi@8d04000 {
+				compatible = "st,stih407-hdmi";
+				reg = <0x8d04000 0x1000>;
+				reg-names = "hdmi-reg";
+				interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
+				interrupt-names	= "irq";
+				clock-names = "pix",
+					      "tmds",
+					      "phy",
+					      "audio",
+					      "main_parent",
+					      "aux_parent";
 
-					clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
-						 <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
-						 <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
-						 <&clk_s_d0_flexgen CLK_PCM_0>,
-						 <&clk_s_d2_quadfs 0>,
-						 <&clk_s_d2_quadfs 1>;
+				clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
+					 <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
+					 <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
+					 <&clk_s_d0_flexgen CLK_PCM_0>,
+					 <&clk_s_d2_quadfs 0>,
+					 <&clk_s_d2_quadfs 1>;
 
-					hdmi,hpd-gpio = <&pio5 3>;
-					reset-names = "hdmi";
-					resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
-					ddc = <&hdmiddc>;
+				hdmi,hpd-gpio = <&pio5 3>;
+				reset-names = "hdmi";
+				resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
+				ddc = <&hdmiddc>;
+			};
 
-				};
-
-				sti-hda@8d02000 {
-					compatible = "st,stih407-hda";
-					reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
-					reg-names = "hda-reg", "video-dacs-ctrl";
-					clock-names = "pix",
-						      "hddac",
-						      "main_parent",
-						      "aux_parent";
-					clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
-						 <&clk_s_d2_flexgen CLK_HDDAC>,
-						 <&clk_s_d2_quadfs 0>,
-						 <&clk_s_d2_quadfs 1>;
-				};
+			sti-hda@8d02000 {
+				compatible = "st,stih407-hda";
+				reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
+				reg-names = "hda-reg", "video-dacs-ctrl";
+				clock-names = "pix",
+					      "hddac",
+					      "main_parent",
+					      "aux_parent";
+				clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
+					 <&clk_s_d2_flexgen CLK_HDDAC>,
+					 <&clk_s_d2_quadfs 0>,
+					 <&clk_s_d2_quadfs 1>;
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index 6f40bc9..8c6e61a 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -178,48 +178,46 @@
 							 <&clk_s_d0_quadfs 0>,
 							 <&clk_s_d2_quadfs 0>,
 							 <&clk_s_d2_quadfs 0>;
-				ranges;
+			};
 
-				sti-hdmi@8d04000 {
-					compatible = "st,stih407-hdmi";
-					reg = <0x8d04000 0x1000>;
-					reg-names = "hdmi-reg";
-					interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
-					interrupt-names	= "irq";
-					clock-names = "pix",
-						      "tmds",
-						      "phy",
-						      "audio",
-						      "main_parent",
-						      "aux_parent";
+			sti-hdmi@8d04000 {
+				compatible = "st,stih407-hdmi";
+				reg = <0x8d04000 0x1000>;
+				reg-names = "hdmi-reg";
+				interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
+				interrupt-names	= "irq";
+				clock-names = "pix",
+					      "tmds",
+					      "phy",
+					      "audio",
+					      "main_parent",
+					      "aux_parent";
 
-					clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
-						 <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
-						 <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
-						 <&clk_s_d0_flexgen CLK_PCM_0>,
-						 <&clk_s_d2_quadfs 0>,
-						 <&clk_s_d2_quadfs 1>;
+				clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
+					 <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
+					 <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
+					 <&clk_s_d0_flexgen CLK_PCM_0>,
+					 <&clk_s_d2_quadfs 0>,
+					 <&clk_s_d2_quadfs 1>;
 
-					hdmi,hpd-gpio = <&pio5 3>;
-					reset-names = "hdmi";
-					resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
-					ddc = <&hdmiddc>;
+				hdmi,hpd-gpio = <&pio5 3>;
+				reset-names = "hdmi";
+				resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
+				ddc = <&hdmiddc>;
+			};
 
-				};
-
-				sti-hda@8d02000 {
-					compatible = "st,stih407-hda";
-					reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
-					reg-names = "hda-reg", "video-dacs-ctrl";
-					clock-names = "pix",
-						      "hddac",
-						      "main_parent",
-						      "aux_parent";
-					clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
-						 <&clk_s_d2_flexgen CLK_HDDAC>,
-						 <&clk_s_d2_quadfs 0>,
-						 <&clk_s_d2_quadfs 1>;
-				};
+			sti-hda@8d02000 {
+				compatible = "st,stih407-hda";
+				reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
+				reg-names = "hda-reg", "video-dacs-ctrl";
+				clock-names = "pix",
+					      "hddac",
+					      "main_parent",
+					      "aux_parent";
+				clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
+					 <&clk_s_d2_flexgen CLK_HDDAC>,
+					 <&clk_s_d2_quadfs 0>,
+					 <&clk_s_d2_quadfs 1>;
 			};
 		};
 
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 96dabcb..996aed3 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -95,7 +95,7 @@
 	}
 }
 
-void it8152_irq_demux(unsigned int irq, struct irq_desc *desc)
+void it8152_irq_demux(struct irq_desc *desc)
 {
        int bits_pd, bits_lp, bits_ld;
        int i;
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index 304adea..0e97b4b 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -138,7 +138,7 @@
 	},
 };
 
-static void locomo_handler(unsigned int __irq, struct irq_desc *desc)
+static void locomo_handler(struct irq_desc *desc)
 {
 	struct locomo *lchip = irq_desc_get_chip_data(desc);
 	int req, i;
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 4f29025..3d22494 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -196,10 +196,8 @@
  * active IRQs causes the interrupt output to pulse, the upper levels
  * will call us again if there are more interrupts to process.
  */
-static void
-sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void sa1111_irq_handler(struct irq_desc *desc)
 {
-	unsigned int irq = irq_desc_get_irq(desc);
 	unsigned int stat0, stat1, i;
 	struct sa1111 *sachip = irq_desc_get_handler_data(desc);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
@@ -214,7 +212,7 @@
 	sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1);
 
 	if (stat0 == 0 && stat1 == 0) {
-		do_bad_IRQ(irq, desc);
+		do_bad_IRQ(desc);
 		return;
 	}
 
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 50c84e1..3f15a5c 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -240,7 +240,8 @@
 CONFIG_PINCTRL_SINGLE=y
 CONFIG_DEBUG_GPIO=y
 CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_PCF857X=m
+CONFIG_GPIO_PCA953X=m
+CONFIG_GPIO_PCF857X=y
 CONFIG_GPIO_TWL4030=y
 CONFIG_GPIO_PALMAS=y
 CONFIG_W1=m
@@ -350,6 +351,8 @@
 CONFIG_USB_MUSB_OMAP2PLUS=m
 CONFIG_USB_MUSB_AM35X=m
 CONFIG_USB_MUSB_DSPS=m
+CONFIG_USB_INVENTRA_DMA=y
+CONFIG_USB_TI_CPPI41_DMA=y
 CONFIG_USB_DWC3=m
 CONFIG_USB_TEST=m
 CONFIG_AM335X_PHY_USB=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 7bbf325..b2bc8e1 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -491,11 +491,6 @@
 #endif
 	.endm
 
-	.macro	uaccess_save_and_disable, tmp
-	uaccess_save \tmp
-	uaccess_disable \tmp
-	.endm
-
 	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
 	.macro	ret\c, reg
 #if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index b274bde..e7335a9 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -40,6 +40,7 @@
 		"2:\t.asciz " #__file "\n" 			\
 		".popsection\n" 				\
 		".pushsection __bug_table,\"a\"\n"		\
+		".align 2\n"					\
 		"3:\t.word 1b, 2b\n"				\
 		"\t.hword " #__line ", 0\n"			\
 		".popsection");					\
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index e878129..fc8ba16 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -12,6 +12,7 @@
 
 #ifndef __ASSEMBLY__
 #include <asm/barrier.h>
+#include <asm/thread_info.h>
 #endif
 
 /*
@@ -89,7 +90,8 @@
 
 	asm(
 	"mrc	p15, 0, %0, c3, c0	@ get domain"
-	 : "=r" (domain));
+	 : "=r" (domain)
+	 : "m" (current_thread_info()->cpu_domain));
 
 	return domain;
 }
@@ -98,7 +100,7 @@
 {
 	asm volatile(
 	"mcr	p15, 0, %0, c3, c0	@ set domain"
-	  : : "r" (val));
+	  : : "r" (val) : "memory");
 	isb();
 }
 
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index d36a73d7..076777f 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -106,7 +106,7 @@
 struct pci_dev;
 struct pci_sys_data;
 
-extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc);
+extern void it8152_irq_demux(struct irq_desc *desc);
 extern void it8152_init_irq(void);
 extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
 extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h
index af79da4..9beb929 100644
--- a/arch/arm/include/asm/hw_irq.h
+++ b/arch/arm/include/asm/hw_irq.h
@@ -11,12 +11,6 @@
 	pr_crit("unexpected IRQ trap at vector %02x\n", irq);
 }
 
-void set_irq_flags(unsigned int irq, unsigned int flags);
-
-#define IRQF_VALID	(1 << 0)
-#define IRQF_PROBE	(1 << 1)
-#define IRQF_NOAUTOEN	(1 << 2)
-
 #define ARCH_IRQ_INIT_FLAGS	(IRQ_NOREQUEST | IRQ_NOPROBE)
 
 #endif
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index dcba0fa..c4072d9 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -29,21 +29,18 @@
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
 
-#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
-#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
-#else
-#define KVM_MAX_VCPUS 0
-#endif
-
 #define KVM_USER_MEM_SLOTS 32
 #define KVM_PRIVATE_MEM_SLOTS 4
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 #define KVM_HAVE_ONE_REG
+#define KVM_HALT_POLL_NS_DEFAULT 500000
 
 #define KVM_VCPU_MAX_FEATURES 2
 
 #include <kvm/arm_vgic.h>
 
+#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
+
 u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
 int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -148,6 +145,7 @@
 
 struct kvm_vcpu_stat {
 	u32 halt_successful_poll;
+	u32 halt_attempted_poll;
 	u32 halt_wakeup;
 };
 
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index 2092ee1..de4634b 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -23,10 +23,10 @@
 /*
  * This is for easy migration, but should be changed in the source
  */
-#define do_bad_IRQ(irq,desc)				\
+#define do_bad_IRQ(desc)				\
 do {							\
 	raw_spin_lock(&desc->lock);			\
-	handle_bad_irq(irq, desc);			\
+	handle_bad_irq(desc);				\
 	raw_spin_unlock(&desc->lock);			\
 } while(0)
 
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index d0a1119..776757d 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -25,7 +25,6 @@
 struct task_struct;
 
 #include <asm/types.h>
-#include <asm/domain.h>
 
 typedef unsigned long mm_segment_t;
 
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 32640c4..7cba573 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
  * This may need to be greater than __NR_last_syscall+1 in order to
  * account for the padding in the syscall table
  */
-#define __NR_syscalls  (388)
+#define __NR_syscalls  (392)
 
 /*
  * *NOTE*: This is a ghost syscall private to the kernel.  Only the
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 0c3f5a0..7a2a32a1 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -414,6 +414,8 @@
 #define __NR_memfd_create		(__NR_SYSCALL_BASE+385)
 #define __NR_bpf			(__NR_SYSCALL_BASE+386)
 #define __NR_execveat			(__NR_SYSCALL_BASE+387)
+#define __NR_userfaultfd		(__NR_SYSCALL_BASE+388)
+#define __NR_membarrier			(__NR_SYSCALL_BASE+389)
 
 /*
  * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 05745eb..fde6c88 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -397,6 +397,8 @@
 /* 385 */	CALL(sys_memfd_create)
 		CALL(sys_bpf)
 		CALL(sys_execveat)
+		CALL(sys_userfaultfd)
+		CALL(sys_membarrier)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 5ff4826..2766183 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -79,26 +79,6 @@
 	handle_IRQ(irq, regs);
 }
 
-void set_irq_flags(unsigned int irq, unsigned int iflags)
-{
-	unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
-
-	if (irq >= nr_irqs) {
-		pr_err("Trying to set irq flags for IRQ%d\n", irq);
-		return;
-	}
-
-	if (iflags & IRQF_VALID)
-		clr |= IRQ_NOREQUEST;
-	if (iflags & IRQF_PROBE)
-		clr |= IRQ_NOPROBE;
-	if (!(iflags & IRQF_NOAUTOEN))
-		clr |= IRQ_NOAUTOEN;
-	/* Order is clear bits in "clr" then set bits in "set" */
-	irq_modify_status(irq, clr, set & ~clr);
-}
-EXPORT_SYMBOL_GPL(set_irq_flags);
-
 void __init init_IRQ(void)
 {
 	int ret;
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index a6ad93c..fd9eefc 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -259,15 +259,17 @@
 	if (err)
 		return err;
 
-	patch_text((void *)bpt->bpt_addr,
-		   *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
+	/* Machine is already stopped, so we can use __patch_text() directly */
+	__patch_text((void *)bpt->bpt_addr,
+		     *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
 
 	return err;
 }
 
 int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
 {
-	patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
+	/* Machine is already stopped, so we can use __patch_text() directly */
+	__patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
 
 	return 0;
 }
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index a3089ba..7a7c4ce 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -226,6 +226,7 @@
 
 	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
 
+#ifdef CONFIG_CPU_USE_DOMAINS
 	/*
 	 * Copy the initial value of the domain access control register
 	 * from the current thread: thread->addr_limit will have been
@@ -233,6 +234,7 @@
 	 * kernel/fork.c
 	 */
 	thread->cpu_domain = get_domain();
+#endif
 
 	if (likely(!(p->flags & PF_KTHREAD))) {
 		*childregs = *current_pt_regs();
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index b6cda06..7b8f214 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -343,15 +343,18 @@
 		 */
 		thumb = handler & 1;
 
-#if __LINUX_ARM_ARCH__ >= 7
 		/*
-		 * Clear the If-Then Thumb-2 execution state
-		 * ARM spec requires this to be all 000s in ARM mode
-		 * Snapdragon S4/Krait misbehaves on a Thumb=>ARM
-		 * signal transition without this.
+		 * Clear the If-Then Thumb-2 execution state.  ARM spec
+		 * requires this to be all 000s in ARM mode.  Snapdragon
+		 * S4/Krait misbehaves on a Thumb=>ARM signal transition
+		 * without this.
+		 *
+		 * We must do this whenever we are running on a Thumb-2
+		 * capable CPU, which includes ARMv6T2.  However, we elect
+		 * to always do this to simplify the code; this field is
+		 * marked UNK/SBZP for older architectures.
 		 */
 		cpsr &= ~PSR_IT_MASK;
-#endif
 
 		if (thumb) {
 			cpsr |= PSR_T_BIT;
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index bfb915d..210ecca 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -45,15 +45,4 @@
 	---help---
 	  Provides host support for ARM processors.
 
-config KVM_ARM_MAX_VCPUS
-	int "Number maximum supported virtual CPUs per VM"
-	depends on KVM_ARM_HOST
-	default 4
-	help
-	  Static number of max supported virtual CPUs per VM.
-
-	  If you choose a high number, the vcpu structures will be quite
-	  large, so only choose a reasonable number that you expect to
-	  actually use.
-
 endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index ce404a5..dc017ad 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -446,7 +446,7 @@
 	 * Map the VGIC hardware resources before running a vcpu the first
 	 * time on this VM.
 	 */
-	if (unlikely(!vgic_ready(kvm))) {
+	if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
 		ret = kvm_vgic_map_resources(kvm);
 		if (ret)
 			return ret;
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 702740d..51a5950 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -515,8 +515,7 @@
 
 	mrc	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
 	str	r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
-	bic	r2, #1			@ Clear ENABLE
-	mcr	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
+
 	isb
 
 	mrrc	p15, 3, rr_lo_hi(r2, r3), c14	@ CNTV_CVAL
@@ -529,6 +528,9 @@
 	mcrr	p15, 4, r2, r2, c14	@ CNTVOFF
 
 1:
+	mov	r2, #0			@ Clear ENABLE
+	mcr	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
+
 	@ Allow physical timer/counter access for the host
 	mrc	p15, 4, r2, c14, c1, 0	@ CNTHCTL
 	orr	r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 7b42012..6984342 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1792,8 +1792,10 @@
 		if (vma->vm_flags & VM_PFNMAP) {
 			gpa_t gpa = mem->guest_phys_addr +
 				    (vm_start - mem->userspace_addr);
-			phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
-					 vm_start - vma->vm_start;
+			phys_addr_t pa;
+
+			pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+			pa += vm_start - vma->vm_start;
 
 			/* IO region dirty page logging not allowed */
 			if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 4b94b51..ad6f642 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -126,7 +126,7 @@
 
 static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
 {
-	int i;
+	int i, matching_cpus = 0;
 	unsigned long mpidr;
 	unsigned long target_affinity;
 	unsigned long target_affinity_mask;
@@ -151,12 +151,16 @@
 	 */
 	kvm_for_each_vcpu(i, tmp, kvm) {
 		mpidr = kvm_vcpu_get_mpidr_aff(tmp);
-		if (((mpidr & target_affinity_mask) == target_affinity) &&
-		    !tmp->arch.pause) {
-			return PSCI_0_2_AFFINITY_LEVEL_ON;
+		if ((mpidr & target_affinity_mask) == target_affinity) {
+			matching_cpus++;
+			if (!tmp->arch.pause)
+				return PSCI_0_2_AFFINITY_LEVEL_ON;
 		}
 	}
 
+	if (!matching_cpus)
+		return PSCI_RET_INVALID_PARAMS;
+
 	return PSCI_0_2_AFFINITY_LEVEL_OFF;
 }
 
diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
index 305d7c6..bfb3703 100644
--- a/arch/arm/mach-dove/irq.c
+++ b/arch/arm/mach-dove/irq.c
@@ -69,14 +69,14 @@
 	.irq_ack	= pmu_irq_ack,
 };
 
-static void pmu_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void pmu_irq_handler(struct irq_desc *desc)
 {
-	unsigned int irq = irq_desc_get_irq(desc);
 	unsigned long cause = readl(PMU_INTERRUPT_CAUSE);
+	unsigned int irq;
 
 	cause &= readl(PMU_INTERRUPT_MASK);
 	if (cause == 0) {
-		do_bad_IRQ(irq, desc);
+		do_bad_IRQ(desc);
 		return;
 	}
 
diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c
index fcd79bc..c01fca1 100644
--- a/arch/arm/mach-footbridge/isa-irq.c
+++ b/arch/arm/mach-footbridge/isa-irq.c
@@ -87,13 +87,12 @@
 	.irq_unmask	= isa_unmask_pic_hi_irq,
 };
 
-static void
-isa_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void isa_irq_handler(struct irq_desc *desc)
 {
 	unsigned int isa_irq = *(unsigned char *)PCIIACK_BASE;
 
 	if (isa_irq < _ISA_IRQ(0) || isa_irq >= _ISA_IRQ(16)) {
-		do_bad_IRQ(isa_irq, desc);
+		do_bad_IRQ(desc);
 		return;
 	}
 
diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c
index 220333e..2478d9f 100644
--- a/arch/arm/mach-gemini/gpio.c
+++ b/arch/arm/mach-gemini/gpio.c
@@ -126,7 +126,7 @@
 	return 0;
 }
 
-static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void gpio_irq_handler(struct irq_desc *desc)
 {
 	unsigned int port = (unsigned int)irq_desc_get_handler_data(desc);
 	unsigned int gpio_irq_no, irq_stat;
diff --git a/arch/arm/mach-imx/3ds_debugboard.c b/arch/arm/mach-imx/3ds_debugboard.c
index 45903be..16496a0 100644
--- a/arch/arm/mach-imx/3ds_debugboard.c
+++ b/arch/arm/mach-imx/3ds_debugboard.c
@@ -85,7 +85,7 @@
 	.resource = smsc911x_resources,
 };
 
-static void mxc_expio_irq_handler(u32 irq, struct irq_desc *desc)
+static void mxc_expio_irq_handler(struct irq_desc *desc)
 {
 	u32 imr_val;
 	u32 int_valid;
diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c
index 2c08535..2b147e4 100644
--- a/arch/arm/mach-imx/mach-mx31ads.c
+++ b/arch/arm/mach-imx/mach-mx31ads.c
@@ -154,7 +154,7 @@
 	imx31_add_imx_uart0(&uart_pdata);
 }
 
-static void mx31ads_expio_irq_handler(u32 irq, struct irq_desc *desc)
+static void mx31ads_expio_irq_handler(struct irq_desc *desc)
 {
 	u32 imr_val;
 	u32 int_valid;
diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c
index 9f89e76..f6235b2 100644
--- a/arch/arm/mach-iop13xx/msi.c
+++ b/arch/arm/mach-iop13xx/msi.c
@@ -91,7 +91,7 @@
 	write_imipr_3,
 };
 
-static void iop13xx_msi_handler(unsigned int irq, struct irq_desc *desc)
+static void iop13xx_msi_handler(struct irq_desc *desc)
 {
 	int i, j;
 	unsigned long status;
diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c
index cce4cef..2ae431e 100644
--- a/arch/arm/mach-lpc32xx/irq.c
+++ b/arch/arm/mach-lpc32xx/irq.c
@@ -370,7 +370,7 @@
 	.irq_set_wake = lpc32xx_irq_wake
 };
 
-static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc)
+static void lpc32xx_sic1_handler(struct irq_desc *desc)
 {
 	unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC1_BASE));
 
@@ -383,7 +383,7 @@
 	}
 }
 
-static void lpc32xx_sic2_handler(unsigned int irq, struct irq_desc *desc)
+static void lpc32xx_sic2_handler(struct irq_desc *desc)
 {
 	unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC2_BASE));
 
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
index 6373e2b..842302d 100644
--- a/arch/arm/mach-netx/generic.c
+++ b/arch/arm/mach-netx/generic.c
@@ -69,8 +69,7 @@
 #define DEBUG_IRQ(fmt...)	while (0) {}
 #endif
 
-static void
-netx_hif_demux_handler(unsigned int irq_unused, struct irq_desc *desc)
+static void netx_hif_demux_handler(struct irq_desc *desc)
 {
 	unsigned int irq = NETX_IRQ_HIF_CHAINED(0);
 	unsigned int stat;
diff --git a/arch/arm/mach-omap1/fpga.c b/arch/arm/mach-omap1/fpga.c
index dfec671..39e20d0 100644
--- a/arch/arm/mach-omap1/fpga.c
+++ b/arch/arm/mach-omap1/fpga.c
@@ -87,7 +87,7 @@
 	fpga_ack_irq(d);
 }
 
-static void innovator_fpga_IRQ_demux(unsigned int irq, struct irq_desc *desc)
+static void innovator_fpga_IRQ_demux(struct irq_desc *desc)
 {
 	u32 stat;
 	int fpga_irq;
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 07d2e10..b3a0dff 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -44,10 +44,11 @@
 	select ARM_CPU_SUSPEND if PM
 	select ARM_GIC
 	select HAVE_ARM_SCU if SMP
-	select HAVE_ARM_TWD if SMP
 	select HAVE_ARM_ARCH_TIMER
 	select ARM_ERRATA_798181 if SMP
+	select OMAP_INTERCONNECT
 	select OMAP_INTERCONNECT_BARRIER
+	select PM_OPP if PM
 
 config SOC_AM33XX
 	bool "TI AM33XX"
@@ -70,10 +71,13 @@
 	select ARCH_OMAP2PLUS
 	select ARM_CPU_SUSPEND if PM
 	select ARM_GIC
+	select HAVE_ARM_SCU if SMP
 	select HAVE_ARM_ARCH_TIMER
 	select IRQ_CROSSBAR
 	select ARM_ERRATA_798181 if SMP
+	select OMAP_INTERCONNECT
 	select OMAP_INTERCONNECT_BARRIER
+	select PM_OPP if PM
 
 config ARCH_OMAP2PLUS
 	bool
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 24c9afc..6133eaa 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -20,13 +20,6 @@
 
 #include "common.h"
 
-#if !(defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3))
-#define intc_of_init	NULL
-#endif
-#ifndef CONFIG_ARCH_OMAP4
-#define gic_of_init		NULL
-#endif
-
 static const struct of_device_id omap_dt_match_table[] __initconst = {
 	{ .compatible = "simple-bus", },
 	{ .compatible = "ti,omap-infra", },
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index e3f713f..54a5ba54 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -653,8 +653,12 @@
 			omap_revision = DRA752_REV_ES1_0;
 			break;
 		case 1:
-		default:
 			omap_revision = DRA752_REV_ES1_1;
+			break;
+		case 2:
+		default:
+			omap_revision = DRA752_REV_ES2_0;
+			break;
 		}
 		break;
 
@@ -674,7 +678,7 @@
 		/* Unknown default to latest silicon rev as default*/
 		pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%x)\n",
 			__func__, idcode, hawkeye, rev);
-		omap_revision = DRA752_REV_ES1_1;
+		omap_revision = DRA752_REV_ES2_0;
 	}
 
 	sprintf(soc_name, "DRA%03x", omap_rev() >> 16);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 980c937..3eaeaca 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -676,6 +676,7 @@
 void __init am43xx_init_late(void)
 {
 	omap_common_late_init();
+	omap2_clk_enable_autoidle_all();
 }
 #endif
 
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 4cb8fd9..72ebc4c 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -901,7 +901,8 @@
 		if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE)
 			return 0;
 
-	if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) {
+	if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER &&
+	    od->_driver_status != BUS_NOTIFY_BIND_DRIVER) {
 		if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
 			dev_warn(dev, "%s: enabled but no driver.  Idling\n",
 				 __func__);
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 425bfcd..b668719 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -103,7 +103,8 @@
 #define PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD	(1 << 0)
 #define PM_OMAP4_CPU_OSWR_DISABLE		(1 << 1)
 
-#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_PM) && (defined(CONFIG_ARCH_OMAP4) ||\
+	   defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX))
 extern u16 pm44xx_errata;
 #define IS_PM44XX_ERRATUM(id)		(pm44xx_errata & (id))
 #else
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 257e98c..3fc2cbe 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -102,7 +102,7 @@
  * dispatched accordingly. Clearing of the wakeup events should be
  * done by the SoC specific individual handlers.
  */
-static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void omap_prcm_irq_handler(struct irq_desc *desc)
 {
 	unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
 	unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index f97654d..2d1d384 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -469,6 +469,8 @@
 #define DRA7XX_CLASS		0x07000000
 #define DRA752_REV_ES1_0	(DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8))
 #define DRA752_REV_ES1_1	(DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8))
+#define DRA752_REV_ES2_0	(DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8))
+#define DRA722_REV_ES1_0	(DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
 #define DRA722_REV_ES1_0	(DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
 
 void omap2xxx_check_revision(void);
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index e4d8701..a556551 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -297,12 +297,8 @@
 	if (IS_ERR(src))
 		return PTR_ERR(src);
 
-	r = clk_set_parent(timer->fclk, src);
-	if (r < 0) {
-		pr_warn("%s: %s cannot set source\n", __func__, oh->name);
-		clk_put(src);
-		return r;
-	}
+	WARN(clk_set_parent(timer->fclk, src) < 0,
+	     "Cannot set timer parent clock, no PLL clock driver?");
 
 	clk_put(src);
 
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index e5a35f6..d44d311 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -300,7 +300,7 @@
 
 	val = voltdm->read(OMAP3_PRM_POLCTRL_OFFSET);
 	if (!(val & OMAP3430_PRM_POLCTRL_CLKREQ_POL) ||
-	    (val & OMAP3430_PRM_POLCTRL_CLKREQ_POL)) {
+	    (val & OMAP3430_PRM_POLCTRL_OFFMODE_POL)) {
 		val |= OMAP3430_PRM_POLCTRL_CLKREQ_POL;
 		val &= ~OMAP3430_PRM_POLCTRL_OFFMODE_POL;
 		pr_debug("PM: fixing sys_clkreq and sys_off_mode polarity to 0x%x\n",
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index 70366b3..a727282 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -496,13 +496,13 @@
 	.irq_unmask	= balloon3_unmask_irq,
 };
 
-static void balloon3_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void balloon3_irq_handler(struct irq_desc *desc)
 {
 	unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) &
 					balloon3_irq_enabled;
 	do {
 		struct irq_data *d = irq_desc_get_irq_data(desc);
-		struct irq_chip *chip = irq_data_get_chip(d);
+		struct irq_chip *chip = irq_desc_get_chip(desc);
 		unsigned int irq;
 
 		/* clear useless edge notification */
diff --git a/arch/arm/mach-pxa/cm-x2xx-pci.c b/arch/arm/mach-pxa/cm-x2xx-pci.c
index 1fa79f1..3221ae1 100644
--- a/arch/arm/mach-pxa/cm-x2xx-pci.c
+++ b/arch/arm/mach-pxa/cm-x2xx-pci.c
@@ -29,13 +29,12 @@
 void __iomem *it8152_base_address;
 static int cmx2xx_it8152_irq_gpio;
 
-static void cmx2xx_it8152_irq_demux(unsigned int __irq, struct irq_desc *desc)
+static void cmx2xx_it8152_irq_demux(struct irq_desc *desc)
 {
-	unsigned int irq = irq_desc_get_irq(desc);
 	/* clear our parent irq */
 	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
-	it8152_irq_demux(irq, desc);
+	it8152_irq_demux(desc);
 }
 
 void __cmx2xx_pci_init_irq(int irq_gpio)
diff --git a/arch/arm/mach-pxa/include/mach/addr-map.h b/arch/arm/mach-pxa/include/mach/addr-map.h
index d28fe29..07b93fd 100644
--- a/arch/arm/mach-pxa/include/mach/addr-map.h
+++ b/arch/arm/mach-pxa/include/mach/addr-map.h
@@ -44,6 +44,13 @@
  */
 
 /*
+ * DFI Bus for NAND, PXA3xx only
+ */
+#define NAND_PHYS		0x43100000
+#define NAND_VIRT		IOMEM(0xf6300000)
+#define NAND_SIZE		0x00100000
+
+/*
  * Internal Memory Controller (PXA27x and later)
  */
 #define IMEMC_PHYS		0x58000000
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index b070167..4823d97 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -120,7 +120,7 @@
 	.irq_unmask	= lpd270_unmask_irq,
 };
 
-static void lpd270_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void lpd270_irq_handler(struct irq_desc *desc)
 {
 	unsigned int irq;
 	unsigned long pending;
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 9a0c8af..d8319b5 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -284,7 +284,7 @@
 	.irq_unmask	= pcm990_unmask_irq,
 };
 
-static void pcm990_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void pcm990_irq_handler(struct irq_desc *desc)
 {
 	unsigned int irq;
 	unsigned long pending;
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index ce0f8d6..06005d3 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -47,6 +47,13 @@
 #define ISRAM_START	0x5c000000
 #define ISRAM_SIZE	SZ_256K
 
+/*
+ * NAND NFC: DFI bus arbitration subset
+ */
+#define NDCR			(*(volatile u32 __iomem*)(NAND_VIRT + 0))
+#define NDCR_ND_ARB_EN		(1 << 12)
+#define NDCR_ND_ARB_CNTL	(1 << 19)
+
 static void __iomem *sram;
 static unsigned long wakeup_src;
 
@@ -362,7 +369,12 @@
 		.pfn		= __phys_to_pfn(PXA3XX_SMEMC_BASE),
 		.length		= SMEMC_SIZE,
 		.type		= MT_DEVICE
-	}
+	}, {
+		.virtual	= (unsigned long)NAND_VIRT,
+		.pfn		= __phys_to_pfn(NAND_PHYS),
+		.length		= NAND_SIZE,
+		.type		= MT_DEVICE
+	},
 };
 
 void __init pxa3xx_map_io(void)
@@ -419,6 +431,13 @@
 		 */
 		ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
 
+		/*
+		 * Disable DFI bus arbitration, to prevent a system bus lock if
+		 * somebody disables the NAND clock (unused clock) while this
+		 * bit remains set.
+		 */
+		NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
+
 		if ((ret = pxa_init_dma(IRQ_DMA, 32)))
 			return ret;
 
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 4841d6c..8ab2637 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -276,7 +276,7 @@
 			viper_irq_enabled_mask;
 }
 
-static void viper_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void viper_irq_handler(struct irq_desc *desc)
 {
 	unsigned int irq;
 	unsigned long pending;
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 6f94dd7..30e62a3 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -105,7 +105,7 @@
 	return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask;
 }
 
-static void zeus_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void zeus_irq_handler(struct irq_desc *desc)
 {
 	unsigned int irq;
 	unsigned long pending;
diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c
index f726d4c..dc67a7f 100644
--- a/arch/arm/mach-rpc/ecard.c
+++ b/arch/arm/mach-rpc/ecard.c
@@ -551,8 +551,7 @@
 	}
 }
 
-static void
-ecard_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ecard_irq_handler(struct irq_desc *desc)
 {
 	ecard_t *ec;
 	int called = 0;
diff --git a/arch/arm/mach-s3c24xx/bast-irq.c b/arch/arm/mach-s3c24xx/bast-irq.c
index ced1ab86..2bb0896 100644
--- a/arch/arm/mach-s3c24xx/bast-irq.c
+++ b/arch/arm/mach-s3c24xx/bast-irq.c
@@ -100,9 +100,7 @@
 	.irq_ack	= bast_pc104_maskack
 };
 
-static void
-bast_irq_pc104_demux(unsigned int irq,
-		     struct irq_desc *desc)
+static void bast_irq_pc104_demux(struct irq_desc *desc)
 {
 	unsigned int stat;
 	unsigned int irqno;
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c
index fd63ecf..ddb30b8 100644
--- a/arch/arm/mach-s3c64xx/common.c
+++ b/arch/arm/mach-s3c64xx/common.c
@@ -388,22 +388,22 @@
 	}
 }
 
-static void s3c_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
+static void s3c_irq_demux_eint0_3(struct irq_desc *desc)
 {
 	s3c_irq_demux_eint(0, 3);
 }
 
-static void s3c_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
+static void s3c_irq_demux_eint4_11(struct irq_desc *desc)
 {
 	s3c_irq_demux_eint(4, 11);
 }
 
-static void s3c_irq_demux_eint12_19(unsigned int irq, struct irq_desc *desc)
+static void s3c_irq_demux_eint12_19(struct irq_desc *desc)
 {
 	s3c_irq_demux_eint(12, 19);
 }
 
-static void s3c_irq_demux_eint20_27(unsigned int irq, struct irq_desc *desc)
+static void s3c_irq_demux_eint20_27(struct irq_desc *desc)
 {
 	s3c_irq_demux_eint(20, 27);
 }
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 6d237b4..8411985 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -166,7 +166,7 @@
  * ensure that the IRQ signal is deasserted before returning.  This
  * is rather unfortunate.
  */
-static void neponset_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void neponset_irq_handler(struct irq_desc *desc)
 {
 	struct neponset_drvdata *d = irq_desc_get_handler_data(desc);
 	unsigned int irr;
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 9769f1e..00b7f7d 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -365,15 +365,21 @@
  user:
 	if (LDST_L_BIT(instr)) {
 		unsigned long val;
+		unsigned int __ua_flags = uaccess_save_and_enable();
+
 		get16t_unaligned_check(val, addr);
+		uaccess_restore(__ua_flags);
 
 		/* signed half-word? */
 		if (instr & 0x40)
 			val = (signed long)((signed short) val);
 
 		regs->uregs[rd] = val;
-	} else
+	} else {
+		unsigned int __ua_flags = uaccess_save_and_enable();
 		put16t_unaligned_check(regs->uregs[rd], addr);
+		uaccess_restore(__ua_flags);
+	}
 
 	return TYPE_LDST;
 
@@ -420,14 +426,21 @@
 
  user:
 	if (load) {
-		unsigned long val;
+		unsigned long val, val2;
+		unsigned int __ua_flags = uaccess_save_and_enable();
+
 		get32t_unaligned_check(val, addr);
+		get32t_unaligned_check(val2, addr + 4);
+
+		uaccess_restore(__ua_flags);
+
 		regs->uregs[rd] = val;
-		get32t_unaligned_check(val, addr + 4);
-		regs->uregs[rd2] = val;
+		regs->uregs[rd2] = val2;
 	} else {
+		unsigned int __ua_flags = uaccess_save_and_enable();
 		put32t_unaligned_check(regs->uregs[rd], addr);
 		put32t_unaligned_check(regs->uregs[rd2], addr + 4);
+		uaccess_restore(__ua_flags);
 	}
 
 	return TYPE_LDST;
@@ -458,10 +471,15 @@
  trans:
 	if (LDST_L_BIT(instr)) {
 		unsigned int val;
+		unsigned int __ua_flags = uaccess_save_and_enable();
 		get32t_unaligned_check(val, addr);
+		uaccess_restore(__ua_flags);
 		regs->uregs[rd] = val;
-	} else
+	} else {
+		unsigned int __ua_flags = uaccess_save_and_enable();
 		put32t_unaligned_check(regs->uregs[rd], addr);
+		uaccess_restore(__ua_flags);
+	}
 	return TYPE_LDST;
 
  fault:
@@ -531,6 +549,7 @@
 #endif
 
 	if (user_mode(regs)) {
+		unsigned int __ua_flags = uaccess_save_and_enable();
 		for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
 		     regbits >>= 1, rd += 1)
 			if (regbits & 1) {
@@ -542,6 +561,7 @@
 					put32t_unaligned_check(regs->uregs[rd], eaddr);
 				eaddr += 4;
 			}
+		uaccess_restore(__ua_flags);
 	} else {
 		for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
 		     regbits >>= 1, rd += 1)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e626043..1a7815e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1249,7 +1249,7 @@
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	dma_addr_t dma_addr, iova;
-	int i, ret = DMA_ERROR_CODE;
+	int i;
 
 	dma_addr = __alloc_iova(mapping, size);
 	if (dma_addr == DMA_ERROR_CODE)
@@ -1257,6 +1257,8 @@
 
 	iova = dma_addr;
 	for (i = 0; i < count; ) {
+		int ret;
+
 		unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
 		phys_addr_t phys = page_to_phys(pages[i]);
 		unsigned int len, j;
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 876060b..6be4151 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -125,7 +125,7 @@
 }
 
 /*
- * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
+ * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
  * (where the assembly routines like __aeabi_uidiv could cause problems).
  */
 static u32 jit_udiv(u32 dividend, u32 divisor)
@@ -133,6 +133,11 @@
 	return dividend / divisor;
 }
 
+static u32 jit_mod(u32 dividend, u32 divisor)
+{
+	return dividend % divisor;
+}
+
 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
 {
 	inst |= (cond << 28);
@@ -471,11 +476,17 @@
 #endif
 }
 
-static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
+static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx,
+				int bpf_op)
 {
 #if __LINUX_ARM_ARCH__ == 7
 	if (elf_hwcap & HWCAP_IDIVA) {
-		emit(ARM_UDIV(rd, rm, rn), ctx);
+		if (bpf_op == BPF_DIV)
+			emit(ARM_UDIV(rd, rm, rn), ctx);
+		else {
+			emit(ARM_UDIV(ARM_R3, rm, rn), ctx);
+			emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx);
+		}
 		return;
 	}
 #endif
@@ -496,7 +507,8 @@
 		emit(ARM_MOV_R(ARM_R0, rm), ctx);
 
 	ctx->seen |= SEEN_CALL;
-	emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
+	emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod,
+		   ctx);
 	emit_blx_r(ARM_R3, ctx);
 
 	if (rd != ARM_R0)
@@ -697,13 +709,27 @@
 			if (k == 1)
 				break;
 			emit_mov_i(r_scratch, k, ctx);
-			emit_udiv(r_A, r_A, r_scratch, ctx);
+			emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV);
 			break;
 		case BPF_ALU | BPF_DIV | BPF_X:
 			update_on_xread(ctx);
 			emit(ARM_CMP_I(r_X, 0), ctx);
 			emit_err_ret(ARM_COND_EQ, ctx);
-			emit_udiv(r_A, r_A, r_X, ctx);
+			emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV);
+			break;
+		case BPF_ALU | BPF_MOD | BPF_K:
+			if (k == 1) {
+				emit_mov_i(r_A, 0, ctx);
+				break;
+			}
+			emit_mov_i(r_scratch, k, ctx);
+			emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD);
+			break;
+		case BPF_ALU | BPF_MOD | BPF_X:
+			update_on_xread(ctx);
+			emit(ARM_CMP_I(r_X, 0), ctx);
+			emit_err_ret(ARM_COND_EQ, ctx);
+			emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD);
 			break;
 		case BPF_ALU | BPF_OR | BPF_K:
 			/* A |= K */
@@ -1047,7 +1073,7 @@
 
 	set_memory_ro((unsigned long)header, header->pages);
 	fp->bpf_func = (void *)ctx.target;
-	fp->jited = true;
+	fp->jited = 1;
 out:
 	kfree(ctx.offsets);
 	return;
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index 4b17d5ab..c46fca2 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -115,6 +115,8 @@
 
 #define ARM_INST_UMULL		0x00800090
 
+#define ARM_INST_MLS		0x00600090
+
 /*
  * Use a suitable undefined instruction to use for ARM/Thumb2 faulting.
  * We need to be careful not to conflict with those used by other modules
@@ -210,4 +212,7 @@
 #define ARM_UMULL(rd_lo, rd_hi, rn, rm)	(ARM_INST_UMULL | (rd_hi) << 16 \
 					 | (rd_lo) << 12 | (rm) << 8 | rn)
 
+#define ARM_MLS(rd, rn, rm, ra)	(ARM_INST_MLS | (rd) << 16 | (rn) | (rm) << 8 \
+				 | (ra) << 12)
+
 #endif /* PFILTER_OPCODES_ARM_H */
diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S
index 71df435..39c20af 100644
--- a/arch/arm/nwfpe/entry.S
+++ b/arch/arm/nwfpe/entry.S
@@ -95,9 +95,10 @@
 	reteq	r4			@ no, return failure
 
 next:
+	uaccess_enable r3
 .Lx1:	ldrt	r6, [r5], #4		@ get the next instruction and
 					@ increment PC
-
+	uaccess_disable r3
 	and	r2, r6, #0x0F000000	@ test for FP insns
 	teq	r2, #0x0C000000
 	teqne	r2, #0x0D000000
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index 79c33ec..7bd22d8 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -407,7 +407,7 @@
 	return 0;
 }
 
-static void gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
+static void gpio_irq_handler(struct irq_desc *desc)
 {
 	struct orion_gpio_chip *ochip = irq_desc_get_handler_data(desc);
 	u32 cause, type;
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index ad9529c..daa1a65 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -107,7 +107,6 @@
 	{ .compatible = "mvrl,pxa168-ssp",	.data = (void *) PXA168_SSP },
 	{ .compatible = "mrvl,pxa910-ssp",	.data = (void *) PXA910_SSP },
 	{ .compatible = "mrvl,ce4100-ssp",	.data = (void *) CE4100_SSP },
-	{ .compatible = "mrvl,lpss-ssp",	.data = (void *) LPSS_SSP },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids);
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
index f00e080..10fd99c 100644
--- a/arch/arm/xen/hypercall.S
+++ b/arch/arm/xen/hypercall.S
@@ -98,8 +98,23 @@
 	mov r1, r2
 	mov r2, r3
 	ldr r3, [sp, #8]
+	/*
+	 * Privcmd calls are issued by the userspace. We need to allow the
+	 * kernel to access the userspace memory before issuing the hypercall.
+	 */
+	uaccess_enable r4
+
+	/* r4 is loaded now as we use it as scratch register before */
 	ldr r4, [sp, #4]
 	__HVC(XEN_IMM)
+
+	/*
+	 * Disable userspace access from kernel. This is fine to do it
+	 * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is
+	 * called before.
+	 */
+	uaccess_disable r4
+
 	ldm sp!, {r4}
 	ret lr
 ENDPROC(privcmd_call);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7d95663..07d1811 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -32,6 +32,7 @@
 	select GENERIC_CLOCKEVENTS_BROADCAST
 	select GENERIC_CPU_AUTOPROBE
 	select GENERIC_EARLY_IOREMAP
+	select GENERIC_IDLE_POLL_SETUP
 	select GENERIC_IRQ_PROBE
 	select GENERIC_IRQ_SHOW
 	select GENERIC_IRQ_SHOW_LEVEL
@@ -331,6 +332,22 @@
 
 	  If unsure, say Y.
 
+config ARM64_ERRATUM_843419
+	bool "Cortex-A53: 843419: A load or store might access an incorrect address"
+	depends on MODULES
+	default y
+	help
+	  This option builds kernel modules using the large memory model in
+	  order to avoid the use of the ADRP instruction, which can cause
+	  a subsequent memory access to use an incorrect address on Cortex-A53
+	  parts up to r0p4.
+
+	  Note that the kernel itself must be linked with a version of ld
+	  which fixes potentially affected ADRP instructions through the
+	  use of veneers.
+
+	  If unsure, say Y.
+
 endmenu
 
 
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 15ff5b4..f9914d7 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -41,6 +41,10 @@
 
 CHECKFLAGS	+= -D__aarch64__
 
+ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
+CFLAGS_MODULE	+= -mcmodel=large
+endif
+
 # Default value
 head-y		:= arch/arm64/kernel/head.o
 
diff --git a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
new file mode 100644
index 0000000..3500586
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
@@ -0,0 +1,193 @@
+soc0: soc@000000000 {
+	#address-cells = <2>;
+	#size-cells = <2>;
+	device_type = "soc";
+	compatible = "simple-bus";
+	ranges = <0x0 0x0 0x0 0x0 0x1 0x0>;
+	chip-id = <0>;
+
+	soc0_mdio0: mdio@803c0000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "hisilicon,hns-mdio";
+		reg = <0x0 0x803c0000 0x0 0x10000
+		       0x0 0x80000000 0x0 0x10000>;
+
+		soc0_phy4: ethernet-phy@4 {
+			reg = <0x0>;
+			device_type = "ethernet-phy";
+			compatible = "ethernet-phy-ieee802.3-c22";
+		};
+		soc0_phy5: ethernet-phy@5 {
+			reg = <0x1>;
+			device_type = "ethernet-phy";
+			compatible = "ethernet-phy-ieee802.3-c22";
+		};
+	};
+
+	dsa: dsa@c7000000 {
+		compatible = "hisilicon,hns-dsaf-v1";
+		dsa_name = "dsaf0";
+		mode = "6port-16rss";
+		interrupt-parent = <&mbigen_dsa>;
+
+		reg = <0x0 0xC0000000 0x0 0x420000
+		       0x0 0xC2000000 0x0 0x300000
+		       0x0 0xc5000000 0x0 0x890000
+		       0x0 0xc7000000 0x0 0x60000
+		       >;
+
+		phy-handle = <0 0 0 0 &soc0_phy4 &soc0_phy5 0 0>;
+		interrupts = <
+			/* [14] ge fifo err 8 / xge 6**/
+			149 0x4 150 0x4 151 0x4 152 0x4
+			153 0x4 154 0x4  26 0x4 27 0x4
+			155 0x4 156 0x4 157 0x4 158 0x4 159 0x4 160 0x4
+			/* [12] rcb com 4*3**/
+			0x6 0x4 0x7 0x4 0x8 0x4 0x9 0x4
+			 16 0x4  17 0x4  18 0x4  19 0x4
+			 22 0x4  23 0x4  24 0x4  25 0x4
+			/* [8] ppe tnl 0-7***/
+			0x0 0x4 0x1 0x4 0x2 0x4 0x3 0x4
+			0x4 0x4 0x5 0x4 12 0x4 13 0x4
+			/* [21] dsaf event int 3+18**/
+			 128 0x4  129 0x4  130 0x4
+			0x83 0x4 0x84 0x4 0x85 0x4 0x86 0x4 0x87 0x4 0x88 0x4
+			0x89 0x4 0x8a 0x4 0x8b 0x4 0x8c 0x4 0x8d 0x4 0x8e 0x4
+			0x8f 0x4 0x90 0x4 0x91 0x4 0x92 0x4 0x93 0x4 0x94 0x4
+			/* [4] debug rcb 2*2*/
+			0xe 0x1 0xf 0x1 0x14 0x1 0x15 0x1
+			/* [256] sevice rcb 2*128*/
+			0x180 0x1 0x181 0x1 0x182 0x1 0x183 0x1
+			0x184 0x1 0x185 0x1 0x186 0x1 0x187 0x1
+			0x188 0x1 0x189 0x1 0x18a 0x1 0x18b 0x1
+			0x18c 0x1 0x18d 0x1 0x18e 0x1 0x18f 0x1
+			0x190 0x1 0x191 0x1 0x192 0x1 0x193 0x1
+			0x194 0x1 0x195 0x1 0x196 0x1 0x197 0x1
+			0x198 0x1 0x199 0x1 0x19a 0x1 0x19b 0x1
+			0x19c 0x1 0x19d 0x1 0x19e 0x1 0x19f 0x1
+			0x1a0 0x1 0x1a1 0x1 0x1a2 0x1 0x1a3 0x1
+			0x1a4 0x1 0x1a5 0x1 0x1a6 0x1 0x1a7 0x1
+			0x1a8 0x1 0x1a9 0x1 0x1aa 0x1 0x1ab 0x1
+			0x1ac 0x1 0x1ad 0x1 0x1ae 0x1 0x1af 0x1
+			0x1b0 0x1 0x1b1 0x1 0x1b2 0x1 0x1b3 0x1
+			0x1b4 0x1 0x1b5 0x1 0x1b6 0x1 0x1b7 0x1
+			0x1b8 0x1 0x1b9 0x1 0x1ba 0x1 0x1bb 0x1
+			0x1bc 0x1 0x1bd 0x1 0x1be 0x1 0x1bf 0x1
+			0x1c0 0x1 0x1c1 0x1 0x1c2 0x1 0x1c3 0x1
+			0x1c4 0x1 0x1c5 0x1 0x1c6 0x1 0x1c7 0x1
+			0x1c8 0x1 0x1c9 0x1 0x1ca 0x1 0x1cb 0x1
+			0x1cc 0x1 0x1cd 0x1 0x1ce 0x1 0x1cf 0x1
+			0x1d0 0x1 0x1d1 0x1 0x1d2 0x1 0x1d3 0x1
+			0x1d4 0x1 0x1d5 0x1 0x1d6 0x1 0x1d7 0x1
+			0x1d8 0x1 0x1d9 0x1 0x1da 0x1 0x1db 0x1
+			0x1dc 0x1 0x1dd 0x1 0x1de 0x1 0x1df 0x1
+			0x1e0 0x1 0x1e1 0x1 0x1e2 0x1 0x1e3 0x1
+			0x1e4 0x1 0x1e5 0x1 0x1e6 0x1 0x1e7 0x1
+			0x1e8 0x1 0x1e9 0x1 0x1ea 0x1 0x1eb 0x1
+			0x1ec 0x1 0x1ed 0x1 0x1ee 0x1 0x1ef 0x1
+			0x1f0 0x1 0x1f1 0x1 0x1f2 0x1 0x1f3 0x1
+			0x1f4 0x1 0x1f5 0x1 0x1f6 0x1 0x1f7 0x1
+			0x1f8 0x1 0x1f9 0x1 0x1fa 0x1 0x1fb 0x1
+			0x1fc 0x1 0x1fd 0x1 0x1fe 0x1 0x1ff 0x1
+			0x200 0x1 0x201 0x1 0x202 0x1 0x203 0x1
+			0x204 0x1 0x205 0x1 0x206 0x1 0x207 0x1
+			0x208 0x1 0x209 0x1 0x20a 0x1 0x20b 0x1
+			0x20c 0x1 0x20d 0x1 0x20e 0x1 0x20f 0x1
+			0x210 0x1 0x211 0x1 0x212 0x1 0x213 0x1
+			0x214 0x1 0x215 0x1 0x216 0x1 0x217 0x1
+			0x218 0x1 0x219 0x1 0x21a 0x1 0x21b 0x1
+			0x21c 0x1 0x21d 0x1 0x21e 0x1 0x21f 0x1
+			0x220 0x1 0x221 0x1 0x222 0x1 0x223 0x1
+			0x224 0x1 0x225 0x1 0x226 0x1 0x227 0x1
+			0x228 0x1 0x229 0x1 0x22a 0x1 0x22b 0x1
+			0x22c 0x1 0x22d 0x1 0x22e 0x1 0x22f 0x1
+			0x230 0x1 0x231 0x1 0x232 0x1 0x233 0x1
+			0x234 0x1 0x235 0x1 0x236 0x1 0x237 0x1
+			0x238 0x1 0x239 0x1 0x23a 0x1 0x23b 0x1
+			0x23c 0x1 0x23d 0x1 0x23e 0x1 0x23f 0x1
+			0x240 0x1 0x241 0x1 0x242 0x1 0x243 0x1
+			0x244 0x1 0x245 0x1 0x246 0x1 0x247 0x1
+			0x248 0x1 0x249 0x1 0x24a 0x1 0x24b 0x1
+			0x24c 0x1 0x24d 0x1 0x24e 0x1 0x24f 0x1
+			0x250 0x1 0x251 0x1 0x252 0x1 0x253 0x1
+			0x254 0x1 0x255 0x1 0x256 0x1 0x257 0x1
+			0x258 0x1 0x259 0x1 0x25a 0x1 0x25b 0x1
+			0x25c 0x1 0x25d 0x1 0x25e 0x1 0x25f 0x1
+			0x260 0x1 0x261 0x1 0x262 0x1 0x263 0x1
+			0x264 0x1 0x265 0x1 0x266 0x1 0x267 0x1
+			0x268 0x1 0x269 0x1 0x26a 0x1 0x26b 0x1
+			0x26c 0x1 0x26d 0x1 0x26e 0x1 0x26f 0x1
+			0x270 0x1 0x271 0x1 0x272 0x1 0x273 0x1
+			0x274 0x1 0x275 0x1 0x276 0x1 0x277 0x1
+			0x278 0x1 0x279 0x1 0x27a 0x1 0x27b 0x1
+			0x27c 0x1 0x27d 0x1 0x27e 0x1 0x27f 0x1>;
+		buf-size = <4096>;
+		desc-num = <1024>;
+		dma-coherent;
+	};
+
+	eth0: ethernet@0{
+		compatible = "hisilicon,hns-nic-v1";
+		ae-name = "dsaf0";
+		port-id = <0>;
+		local-mac-address = [00 00 00 01 00 58];
+		status = "disabled";
+		dma-coherent;
+	};
+	eth1: ethernet@1{
+		compatible = "hisilicon,hns-nic-v1";
+		ae-name = "dsaf0";
+		port-id = <1>;
+		status = "disabled";
+		dma-coherent;
+	};
+	eth2: ethernet@2{
+		compatible = "hisilicon,hns-nic-v1";
+		ae-name = "dsaf0";
+		port-id = <2>;
+		local-mac-address = [00 00 00 01 00 5a];
+		status = "disabled";
+		dma-coherent;
+	};
+	eth3: ethernet@3{
+		compatible = "hisilicon,hns-nic-v1";
+		ae-name = "dsaf0";
+		port-id = <3>;
+		local-mac-address = [00 00 00 01 00 5b];
+		status = "disabled";
+		dma-coherent;
+	};
+	eth4: ethernet@4{
+		compatible = "hisilicon,hns-nic-v1";
+		ae-name = "dsaf0";
+		port-id = <4>;
+		local-mac-address = [00 00 00 01 00 5c];
+		status = "disabled";
+		dma-coherent;
+	};
+	eth5: ethernet@5{
+		compatible = "hisilicon,hns-nic-v1";
+		ae-name = "dsaf0";
+		port-id = <5>;
+		local-mac-address = [00 00 00 01 00 5d];
+		status = "disabled";
+		dma-coherent;
+	};
+	eth6: ethernet@6{
+		compatible = "hisilicon,hns-nic-v1";
+		ae-name = "dsaf0";
+		port-id = <6>;
+		local-mac-address = [00 00 00 01 00 5e];
+		status = "disabled";
+		dma-coherent;
+	};
+	eth7: ethernet@7{
+		compatible = "hisilicon,hns-nic-v1";
+		ae-name = "dsaf0";
+		port-id = <7>;
+		local-mac-address = [00 00 00 01 00 5f];
+		status = "disabled";
+		dma-coherent;
+	};
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index d18ee42..06a1564 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -81,7 +81,7 @@
 		};
 
 		idle-states {
-			entry-method = "arm,psci";
+			entry-method = "psci";
 
 			CPU_SLEEP_0: cpu-sleep-0 {
 				compatible = "arm,idle-state";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index a712bea..cc093a4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -106,7 +106,7 @@
 		};
 
 		idle-states {
-			entry-method = "arm,psci";
+			entry-method = "psci";
 
 			cpu_sleep: cpu-sleep-0 {
 				compatible = "arm,idle-state";
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 2bb7009..a57601f 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -43,9 +43,4 @@
 	irq_err_count++;
 }
 
-/*
- * No arch-specific IRQ flags.
- */
-#define set_irq_flags(irq, flags)
-
 #endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 7605e09..9694f26 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -95,6 +95,7 @@
 			 SCTLR_EL2_SA | SCTLR_EL2_I)
 
 /* TCR_EL2 Registers bits */
+#define TCR_EL2_RES1	((1 << 31) | (1 << 23))
 #define TCR_EL2_TBI	(1 << 20)
 #define TCR_EL2_PS	(7 << 16)
 #define TCR_EL2_PS_40B	(2 << 16)
@@ -106,9 +107,10 @@
 #define TCR_EL2_MASK	(TCR_EL2_TG0 | TCR_EL2_SH0 | \
 			 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
 
-#define TCR_EL2_FLAGS	(TCR_EL2_PS_40B)
+#define TCR_EL2_FLAGS	(TCR_EL2_RES1 | TCR_EL2_PS_40B)
 
 /* VTCR_EL2 Registers bits */
+#define VTCR_EL2_RES1		(1 << 31)
 #define VTCR_EL2_PS_MASK	(7 << 16)
 #define VTCR_EL2_TG0_MASK	(1 << 14)
 #define VTCR_EL2_TG0_4K		(0 << 14)
@@ -147,7 +149,8 @@
  */
 #define VTCR_EL2_FLAGS		(VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
 				 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
-				 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
+				 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
+				 VTCR_EL2_RES1)
 #define VTTBR_X		(38 - VTCR_EL2_T0SZ_40B)
 #else
 /*
@@ -158,7 +161,8 @@
  */
 #define VTCR_EL2_FLAGS		(VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
 				 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
-				 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
+				 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
+				 VTCR_EL2_RES1)
 #define VTTBR_X		(37 - VTCR_EL2_T0SZ_40B)
 #endif
 
@@ -168,7 +172,6 @@
 #define VTTBR_VMID_MASK	  (UL(0xFF) << VTTBR_VMID_SHIFT)
 
 /* Hyp System Trap Register */
-#define HSTR_EL2_TTEE	(1 << 16)
 #define HSTR_EL2_T(x)	(1 << x)
 
 /* Hyp Coproccessor Trap Register Shifts */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 67fa0de..5e37710 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -53,9 +53,7 @@
 #define	IFSR32_EL2	25	/* Instruction Fault Status Register */
 #define	FPEXC32_EL2	26	/* Floating-Point Exception Control Register */
 #define	DBGVCR32_EL2	27	/* Debug Vector Catch Register */
-#define	TEECR32_EL1	28	/* ThumbEE Configuration Register */
-#define	TEEHBR32_EL1	29	/* ThumbEE Handler Base Register */
-#define	NR_SYS_REGS	30
+#define	NR_SYS_REGS	28
 
 /* 32bit mapping */
 #define c0_MPIDR	(MPIDR_EL1 * 2)	/* MultiProcessor ID Register */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 415938d..ed03968 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -30,19 +30,16 @@
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
 
-#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
-#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
-#else
-#define KVM_MAX_VCPUS 0
-#endif
-
 #define KVM_USER_MEM_SLOTS 32
 #define KVM_PRIVATE_MEM_SLOTS 4
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_HALT_POLL_NS_DEFAULT 500000
 
 #include <kvm/arm_vgic.h>
 #include <kvm/arm_arch_timer.h>
 
+#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
+
 #define KVM_VCPU_MAX_FEATURES 3
 
 int __attribute_const__ kvm_target_cpu(void);
@@ -195,6 +192,7 @@
 
 struct kvm_vcpu_stat {
 	u32 halt_successful_poll;
+	u32 halt_attempted_poll;
 	u32 halt_wakeup;
 };
 
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 6900b2d9..b0329be 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -26,13 +26,9 @@
  * Software defined PTE bits definition.
  */
 #define PTE_VALID		(_AT(pteval_t, 1) << 0)
+#define PTE_WRITE		(PTE_DBM)		 /* same as DBM (51) */
 #define PTE_DIRTY		(_AT(pteval_t, 1) << 55)
 #define PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
-#ifdef CONFIG_ARM64_HW_AFDBM
-#define PTE_WRITE		(PTE_DBM)		 /* same as DBM */
-#else
-#define PTE_WRITE		(_AT(pteval_t, 1) << 57)
-#endif
 #define PTE_PROT_NONE		(_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
 
 /*
@@ -146,7 +142,7 @@
 #define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
 
 #ifdef CONFIG_ARM64_HW_AFDBM
-#define pte_hw_dirty(pte)	(!(pte_val(pte) & PTE_RDONLY))
+#define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
 #else
 #define pte_hw_dirty(pte)	(0)
 #endif
@@ -238,7 +234,7 @@
  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
  * the page fault mechanism. Checking the dirty status of a pte becomes:
  *
- *   PTE_DIRTY || !PTE_RDONLY
+ *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
  */
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 			      pte_t *ptep, pte_t pte)
@@ -503,7 +499,7 @@
 			      PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
 	/* preserve the hardware dirty information */
 	if (pte_hw_dirty(pte))
-		newprot |= PTE_DIRTY;
+		pte = pte_mkdirty(pte);
 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
 	return pte;
 }
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 9b3b62a..cebf786 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -134,7 +134,7 @@
 				    unsigned long action, void *data)
 {
 	int cpu = (unsigned long)data;
-	if (action == CPU_ONLINE)
+	if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
 		smp_call_function_single(cpu, clear_os_lock, NULL, 1);
 	return NOTIFY_OK;
 }
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a055be6..90d09ed 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -523,6 +523,11 @@
 	msr	hstr_el2, xzr			// Disable CP15 traps to EL2
 #endif
 
+	/* EL2 debug */
+	mrs	x0, pmcr_el0			// Disable debug access traps
+	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
+	msr	mdcr_el2, x0			// all PMU counters from EL1
+
 	/* Stage-2 translation */
 	msr	vttbr_el2, xzr
 
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index c97040e..bba85c8 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -872,7 +872,7 @@
 						void *hcpu)
 {
 	int cpu = (long)hcpu;
-	if (action == CPU_ONLINE)
+	if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
 		smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
 	return NOTIFY_OK;
 }
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 67bf410..876eb8d 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -332,12 +332,14 @@
 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
 					     AARCH64_INSN_IMM_ADR);
 			break;
+#ifndef CONFIG_ARM64_ERRATUM_843419
 		case R_AARCH64_ADR_PREL_PG_HI21_NC:
 			overflow_check = false;
 		case R_AARCH64_ADR_PREL_PG_HI21:
 			ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
 					     AARCH64_INSN_IMM_ADR);
 			break;
+#endif
 		case R_AARCH64_ADD_ABS_LO12_NC:
 		case R_AARCH64_LDST8_ABS_LO12_NC:
 			overflow_check = false;
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 948f0ad..71ef6dc 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -212,14 +212,32 @@
 
 /*
  * VFP save/restore code.
+ *
+ * We have to be careful with endianness, since the fpsimd context-switch
+ * code operates on 128-bit (Q) register values whereas the compat ABI
+ * uses an array of 64-bit (D) registers. Consequently, we need to swap
+ * the two halves of each Q register when running on a big-endian CPU.
  */
+union __fpsimd_vreg {
+	__uint128_t	raw;
+	struct {
+#ifdef __AARCH64EB__
+		u64	hi;
+		u64	lo;
+#else
+		u64	lo;
+		u64	hi;
+#endif
+	};
+};
+
 static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
 {
 	struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
 	compat_ulong_t magic = VFP_MAGIC;
 	compat_ulong_t size = VFP_STORAGE_SIZE;
 	compat_ulong_t fpscr, fpexc;
-	int err = 0;
+	int i, err = 0;
 
 	/*
 	 * Save the hardware registers to the fpsimd_state structure.
@@ -235,10 +253,15 @@
 	/*
 	 * Now copy the FP registers. Since the registers are packed,
 	 * we can copy the prefix we want (V0-V15) as it is.
-	 * FIXME: Won't work if big endian.
 	 */
-	err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
-			      sizeof(frame->ufp.fpregs));
+	for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+		union __fpsimd_vreg vreg = {
+			.raw = fpsimd->vregs[i >> 1],
+		};
+
+		__put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+		__put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+	}
 
 	/* Create an AArch32 fpscr from the fpsr and the fpcr. */
 	fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
@@ -263,7 +286,7 @@
 	compat_ulong_t magic = VFP_MAGIC;
 	compat_ulong_t size = VFP_STORAGE_SIZE;
 	compat_ulong_t fpscr;
-	int err = 0;
+	int i, err = 0;
 
 	__get_user_error(magic, &frame->magic, err);
 	__get_user_error(size, &frame->size, err);
@@ -273,12 +296,14 @@
 	if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
 		return -EINVAL;
 
-	/*
-	 * Copy the FP registers into the start of the fpsimd_state.
-	 * FIXME: Won't work if big endian.
-	 */
-	err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
-				sizeof(frame->ufp.fpregs));
+	/* Copy the FP registers into the start of the fpsimd_state. */
+	for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+		union __fpsimd_vreg vreg;
+
+		__get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+		__get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+		fpsimd.vregs[i >> 1] = vreg.raw;
+	}
 
 	/* Extract the fpsr and the fpcr from the fpscr */
 	__get_user_error(fpscr, &frame->ufp.fpscr, err);
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index bfffe8f..5c7e920e 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -41,15 +41,4 @@
 	---help---
 	  Provides host support for ARM processors.
 
-config KVM_ARM_MAX_VCPUS
-	int "Number maximum supported virtual CPUs per VM"
-	depends on KVM_ARM_HOST
-	default 4
-	help
-	  Static number of max supported virtual CPUs per VM.
-
-	  If you choose a high number, the vcpu structures will be quite
-	  large, so only choose a reasonable number that you expect to
-	  actually use.
-
 endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 37c89ea..e583613 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -433,20 +433,13 @@
 	mrs	x5, ifsr32_el2
 	stp	x4, x5, [x3]
 
-	skip_fpsimd_state x8, 3f
+	skip_fpsimd_state x8, 2f
 	mrs	x6, fpexc32_el2
 	str	x6, [x3, #16]
-3:
-	skip_debug_state x8, 2f
+2:
+	skip_debug_state x8, 1f
 	mrs	x7, dbgvcr32_el2
 	str	x7, [x3, #24]
-2:
-	skip_tee_state x8, 1f
-
-	add	x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
-	mrs	x4, teecr32_el1
-	mrs	x5, teehbr32_el1
-	stp	x4, x5, [x3]
 1:
 .endm
 
@@ -466,16 +459,9 @@
 	msr	dacr32_el2, x4
 	msr	ifsr32_el2, x5
 
-	skip_debug_state x8, 2f
+	skip_debug_state x8, 1f
 	ldr	x7, [x3, #24]
 	msr	dbgvcr32_el2, x7
-2:
-	skip_tee_state x8, 1f
-
-	add	x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
-	ldp	x4, x5, [x3]
-	msr	teecr32_el1, x4
-	msr	teehbr32_el1, x5
 1:
 .endm
 
@@ -570,8 +556,6 @@
 	mrs	x3, cntv_ctl_el0
 	and	x3, x3, #3
 	str	w3, [x0, #VCPU_TIMER_CNTV_CTL]
-	bic	x3, x3, #1		// Clear Enable
-	msr	cntv_ctl_el0, x3
 
 	isb
 
@@ -579,6 +563,9 @@
 	str	x3, [x0, #VCPU_TIMER_CNTV_CVAL]
 
 1:
+	// Disable the virtual timer
+	msr	cntv_ctl_el0, xzr
+
 	// Allow physical timer/counter access for the host
 	mrs	x2, cnthctl_el2
 	orr	x2, x2, #3
@@ -753,6 +740,9 @@
 	// Guest context
 	add	x2, x0, #VCPU_CONTEXT
 
+	// We must restore the 32-bit state before the sysregs, thanks
+	// to Cortex-A57 erratum #852523.
+	restore_guest_32bit_state
 	bl __restore_sysregs
 
 	skip_debug_state x3, 1f
@@ -760,7 +750,6 @@
 	kern_hyp_va x3
 	bl	__restore_debug
 1:
-	restore_guest_32bit_state
 	restore_guest_regs
 
 	// That's it, no more messing around.
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index b41607d..d03d3af 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -272,7 +272,7 @@
 {
 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 
-	if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 		return -EFAULT;
 	return 0;
 }
@@ -314,7 +314,7 @@
 {
 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
 
-	if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 		return -EFAULT;
 
 	return 0;
@@ -358,7 +358,7 @@
 {
 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
 
-	if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 		return -EFAULT;
 	return 0;
 }
@@ -400,7 +400,7 @@
 {
 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
 
-	if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 		return -EFAULT;
 	return 0;
 }
@@ -539,13 +539,6 @@
 	{ Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
 	  trap_dbgauthstatus_el1 },
 
-	/* TEECR32_EL1 */
-	{ Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
-	  NULL, reset_val, TEECR32_EL1, 0 },
-	/* TEEHBR32_EL1 */
-	{ Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
-	  NULL, reset_val, TEEHBR32_EL1, 0 },
-
 	/* MDCCSR_EL1 */
 	{ Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
 	  trap_raz_wi },
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 0bcc4bc..99224dc 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -100,7 +100,7 @@
 	if (IS_ENABLED(CONFIG_ZONE_DMA) &&
 	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
 		flags |= GFP_DMA;
-	if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
+	if (dev_get_cma_area(dev) && (flags & __GFP_WAIT)) {
 		struct page *page;
 		void *addr;
 
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index c047598..a44e529 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -744,7 +744,7 @@
 
 	set_memory_ro((unsigned long)header, header->pages);
 	prog->bpf_func = (void *)ctx.image;
-	prog->jited = true;
+	prog->jited = 1;
 out:
 	kfree(ctx.offset);
 }
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c
index d51ff8f..96cabad 100644
--- a/arch/avr32/mach-at32ap/extint.c
+++ b/arch/avr32/mach-at32ap/extint.c
@@ -144,7 +144,7 @@
 	.irq_set_type	= eic_set_irq_type,
 };
 
-static void demux_eic_irq(unsigned int irq, struct irq_desc *desc)
+static void demux_eic_irq(struct irq_desc *desc)
 {
 	struct eic *eic = irq_desc_get_handler_data(desc);
 	unsigned long status, pending;
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index 157a5e0..4f61378 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -281,7 +281,7 @@
 	.irq_set_type	= gpio_irq_type,
 };
 
-static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void gpio_irq_handler(struct irq_desc *desc)
 {
 	struct pio_device	*pio = irq_desc_get_chip_data(desc);
 	unsigned		gpio_irq;
diff --git a/arch/blackfin/include/asm/irq_handler.h b/arch/blackfin/include/asm/irq_handler.h
index 4b2a992..d2f90c7 100644
--- a/arch/blackfin/include/asm/irq_handler.h
+++ b/arch/blackfin/include/asm/irq_handler.h
@@ -60,7 +60,7 @@
 extern void bfin_internal_unmask_irq(unsigned int irq);
 
 struct irq_desc;
-extern void bfin_demux_mac_status_irq(unsigned int, struct irq_desc *);
-extern void bfin_demux_gpio_irq(unsigned int, struct irq_desc *);
+extern void bfin_demux_mac_status_irq(struct irq_desc *);
+extern void bfin_demux_gpio_irq(struct irq_desc *);
 
 #endif
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 0ba2576..052cde5 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -107,7 +107,7 @@
 	 * than crashing, do something sensible.
 	 */
 	if (irq >= NR_IRQS)
-		handle_bad_irq(irq, &bad_irq_desc);
+		handle_bad_irq(&bad_irq_desc);
 	else
 		generic_handle_irq(irq);
 
diff --git a/arch/blackfin/mach-bf537/ints-priority.c b/arch/blackfin/mach-bf537/ints-priority.c
index 14b2f74..a48baae 100644
--- a/arch/blackfin/mach-bf537/ints-priority.c
+++ b/arch/blackfin/mach-bf537/ints-priority.c
@@ -89,8 +89,7 @@
 	.irq_unmask = bf537_generic_error_unmask_irq,
 };
 
-static void bf537_demux_error_irq(unsigned int int_err_irq,
-				  struct irq_desc *inta_desc)
+static void bf537_demux_error_irq(struct irq_desc *inta_desc)
 {
 	int irq = 0;
 
@@ -182,15 +181,12 @@
 	.irq_unmask = bf537_mac_rx_unmask_irq,
 };
 
-static void bf537_demux_mac_rx_irq(unsigned int __int_irq,
-				   struct irq_desc *desc)
+static void bf537_demux_mac_rx_irq(struct irq_desc *desc)
 {
-	unsigned int int_irq = irq_desc_get_irq(desc);
-
 	if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR))
 		bfin_handle_irq(IRQ_MAC_RX);
 	else
-		bfin_demux_gpio_irq(int_irq, desc);
+		bfin_demux_gpio_irq(desc);
 }
 #endif
 
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index a6d1b03..e8d4d74 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -656,8 +656,7 @@
 	.irq_set_wake = bfin_mac_status_set_wake,
 };
 
-void bfin_demux_mac_status_irq(unsigned int int_err_irq,
-			       struct irq_desc *inta_desc)
+void bfin_demux_mac_status_irq(struct irq_desc *inta_desc)
 {
 	int i, irq = 0;
 	u32 status = bfin_read_EMAC_SYSTAT();
@@ -825,7 +824,7 @@
 	}
 }
 
-void bfin_demux_gpio_irq(unsigned int __inta_irq, struct irq_desc *desc)
+void bfin_demux_gpio_irq(struct irq_desc *desc)
 {
 	unsigned int inta_irq = irq_desc_get_irq(desc);
 	unsigned int irq;
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c
index d487698..ddcb45d 100644
--- a/arch/c6x/platforms/megamod-pic.c
+++ b/arch/c6x/platforms/megamod-pic.c
@@ -93,7 +93,7 @@
 	.irq_unmask	= unmask_megamod,
 };
 
-static void megamod_irq_cascade(unsigned int __irq, struct irq_desc *desc)
+static void megamod_irq_cascade(struct irq_desc *desc)
 {
 	struct megamod_cascade_data *cascade;
 	struct megamod_pic *pic;
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 0314e32..8da5653 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -36,6 +36,17 @@
 	int
 	default 6
 
+config TRACE_IRQFLAGS_SUPPORT
+	depends on ETRAX_ARCH_V32
+	def_bool y
+
+config STACKTRACE_SUPPORT
+	def_bool y
+
+config LOCKDEP_SUPPORT
+	depends on ETRAX_ARCH_V32
+	def_bool y
+
 config CRIS
 	bool
 	default y
@@ -58,6 +69,7 @@
 	select CLKSRC_MMIO if ETRAX_ARCH_V32
 	select GENERIC_CLOCKEVENTS if ETRAX_ARCH_V32
 	select GENERIC_SCHED_CLOCK if ETRAX_ARCH_V32
+	select HAVE_DEBUG_BUGVERBOSE if ETRAX_ARCH_V32
 
 config HZ
 	int
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index 81570fc..b562252 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -955,6 +955,14 @@
 	.long sys_process_vm_writev
 	.long sys_kcmp			/* 350 */
 	.long sys_finit_module
+	.long sys_sched_setattr
+	.long sys_sched_getattr
+	.long sys_renameat2
+	.long sys_seccomp		/* 355 */
+	.long sys_getrandom
+	.long sys_memfd_create
+	.long sys_bpf
+	.long sys_execveat
 
         /*
          * NOTE!! This doesn't have to be exact - we just have
diff --git a/arch/cris/arch-v10/lib/dmacopy.c b/arch/cris/arch-v10/lib/dmacopy.c
deleted file mode 100644
index 49f5b8c..0000000
--- a/arch/cris/arch-v10/lib/dmacopy.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * memcpy for large blocks, using memory-memory DMA channels 6 and 7 in Etrax
- */
-
-#include <asm/svinto.h>
-#include <asm/io.h>
-
-#define D(x)
-
-void *dma_memcpy(void *pdst,
-		 const void *psrc,
-		 unsigned int pn)
-{
-	static etrax_dma_descr indma, outdma;
-
-	D(printk(KERN_DEBUG "dma_memcpy %d bytes... ", pn));
-
-#if 0
-	*R_GEN_CONFIG = genconfig_shadow =
-		(genconfig_shadow & ~0x3c0000) |
-		IO_STATE(R_GEN_CONFIG, dma6, intdma7) |
-		IO_STATE(R_GEN_CONFIG, dma7, intdma6);
-#endif
-	indma.sw_len = outdma.sw_len = pn;
-	indma.ctrl = d_eol | d_eop;
-	outdma.ctrl = d_eol;
-	indma.buf = psrc;
-	outdma.buf = pdst;
-
-	*R_DMA_CH6_FIRST = &indma;
-	*R_DMA_CH7_FIRST = &outdma;
-	*R_DMA_CH6_CMD = IO_STATE(R_DMA_CH6_CMD, cmd, start);
-	*R_DMA_CH7_CMD = IO_STATE(R_DMA_CH7_CMD, cmd, start);
-
-	while (*R_DMA_CH7_CMD == 1)
-		/* wait for completion */;
-
-	D(printk(KERN_DEBUG "done\n"));
-}
-
-
-
diff --git a/arch/cris/arch-v10/lib/old_checksum.c b/arch/cris/arch-v10/lib/old_checksum.c
deleted file mode 100644
index 8f79163..0000000
--- a/arch/cris/arch-v10/lib/old_checksum.c
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * INET		An implementation of the TCP/IP protocol suite for the LINUX
- *		operating system.  INET is implemented using the  BSD Socket
- *		interface as the means of communication with the user level.
- *
- *		IP/TCP/UDP checksumming routines
- *
- * Authors:	Jorge Cwik, <jorge@laser.satlink.net>
- *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- *		Tom May, <ftom@netcom.com>
- *		Lots of code moved from tcp.c and ip.c; see those files
- *		for more names.
- *
- *		This program is free software; you can redistribute it and/or
- *		modify it under the terms of the GNU General Public License
- *		as published by the Free Software Foundation; either version
- *		2 of the License, or (at your option) any later version.
- */
-
-#include <net/checksum.h>
-#include <net/module.h>
-
-#undef PROFILE_CHECKSUM
-
-#ifdef PROFILE_CHECKSUM
-/* these are just for profiling the checksum code with an oscillioscope.. uh */
-#if 0
-#define BITOFF *((unsigned char *)0xb0000030) = 0xff
-#define BITON *((unsigned char *)0xb0000030) = 0x0
-#endif
-#include <asm/io.h>
-#define CBITON LED_ACTIVE_SET(1)
-#define CBITOFF LED_ACTIVE_SET(0)
-#define BITOFF
-#define BITON
-#else
-#define BITOFF
-#define BITON
-#define CBITOFF
-#define CBITON
-#endif
-
-/*
- * computes a partial checksum, e.g. for TCP/UDP fragments
- */
-
-#include <asm/delay.h>
-
-__wsum csum_partial(const void *p, int len, __wsum __sum)
-{
-	u32 sum = (__force u32)__sum;
-	const u16 *buff = p;
-	/*
-	* Experiments with ethernet and slip connections show that buff
-	* is aligned on either a 2-byte or 4-byte boundary.
-	*/
-	const void *endMarker = p + len;
-	const void *marker = endMarker - (len % 16);
-#if 0
-	if((int)buff & 0x3)
-		printk("unaligned buff %p\n", buff);
-	__delay(900); /* extra delay of 90 us to test performance hit */
-#endif
-	BITON;
-	while (buff < marker) {
-		sum += *buff++;
-		sum += *buff++;
-		sum += *buff++;
-		sum += *buff++;
-		sum += *buff++;
-		sum += *buff++;
-		sum += *buff++;
-		sum += *buff++;
-	}
-	marker = endMarker - (len % 2);
-	while (buff < marker)
-		sum += *buff++;
-
-	if (endMarker > buff)
-		sum += *(const u8 *)buff;	/* add extra byte separately */
-
-	BITOFF;
-	return (__force __wsum)sum;
-}
-
-EXPORT_SYMBOL(csum_partial);
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 4fc16b4..e6c523c 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -202,7 +202,7 @@
 	default "0x00" if ETRAXFS
 	default "0x00000000" if !ETRAXFS
 	help
-	  This is a bitmask (8 bits) with information of what bits in PA that a
+	  This is a bitmask with information of what bits in PA that a
 	  user can change direction on using ioctl's.
 	  Bit set = changeable.
 	  You probably want 0 here, but it depends on your hardware.
@@ -213,7 +213,7 @@
 	default "0x00" if ETRAXFS
 	default "0x00000000" if !ETRAXFS
 	help
-	  This is a bitmask (8 bits) with information of what bits in PA
+	  This is a bitmask with information of what bits in PA
 	  that a user can change the value on using ioctl's.
 	  Bit set = changeable.
 
@@ -223,7 +223,7 @@
 	default "0x00000" if ETRAXFS
 	default "0x00000000" if !ETRAXFS
 	help
-	  This is a bitmask (18 bits) with information of what bits in PB
+	  This is a bitmask with information of what bits in PB
 	  that a user can change direction on using ioctl's.
 	  Bit set = changeable.
 	  You probably want 0 here, but it depends on your hardware.
@@ -234,7 +234,7 @@
 	default "0x00000" if ETRAXFS
 	default "0x00000000" if !ETRAXFS
 	help
-	  This is a bitmask (18 bits) with information of what bits in PB
+	  This is a bitmask with information of what bits in PB
 	  that a user can change the value on using ioctl's.
 	  Bit set = changeable.
 
@@ -244,7 +244,7 @@
 	default "0x00000" if ETRAXFS
 	default "0x00000000" if !ETRAXFS
 	help
-	  This is a bitmask (18 bits) with information of what bits in PC
+	  This is a bitmask with information of what bits in PC
 	  that a user can change direction on using ioctl's.
 	  Bit set = changeable.
 	  You probably want 0 here, but it depends on your hardware.
@@ -253,9 +253,9 @@
 	hex "PC user changeable bits mask"
 	depends on ETRAX_GPIO
 	default "0x00000" if ETRAXFS
-	default "0x00000000" if ETRAXFS
+	default "0x00000000" if !ETRAXFS
 	help
-	  This is a bitmask (18 bits) with information of what bits in PC
+	  This is a bitmask with information of what bits in PC
 	  that a user can change the value on using ioctl's.
 	  Bit set = changeable.
 
@@ -264,7 +264,7 @@
 	depends on ETRAX_GPIO && ETRAXFS
 	default "0x00000"
 	help
-	  This is a bitmask (18 bits) with information of what bits in PD
+	  This is a bitmask with information of what bits in PD
 	  that a user can change direction on using ioctl's.
 	  Bit set = changeable.
 	  You probably want 0x00000 here, but it depends on your hardware.
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index 28dd771..5387424 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -313,6 +313,7 @@
 	size_t len;
 	int ram_rootfs_partition = -1; /* -1 => no RAM rootfs partition */
 	int part;
+	struct mtd_partition *partition;
 
 	/* We need a root fs. If it resides in RAM, we need to use an
 	 * MTDRAM device, so it must be enabled in the kernel config,
@@ -329,7 +330,7 @@
 
 	main_mtd = flash_probe();
 	if (main_mtd)
-		printk(KERN_INFO "%s: 0x%08x bytes of NOR flash memory.\n",
+		printk(KERN_INFO "%s: 0x%08llx bytes of NOR flash memory.\n",
 		       main_mtd->name, main_mtd->size);
 
 #ifdef CONFIG_ETRAX_NANDFLASH
@@ -388,10 +389,10 @@
 #endif
 
 	if (main_mtd) {
+		loff_t ptable_sector = CONFIG_ETRAX_PTABLE_SECTOR;
 		main_mtd->owner = THIS_MODULE;
 		axisflash_mtd = main_mtd;
 
-		loff_t ptable_sector = CONFIG_ETRAX_PTABLE_SECTOR;
 
 		/* First partition (rescue) is always set to the default. */
 		pidx++;
@@ -517,7 +518,7 @@
 	/* Decide whether to use default partition table. */
 	/* Only use default table if we actually have a device (main_mtd) */
 
-	struct mtd_partition *partition = &axis_partitions[0];
+	partition = &axis_partitions[0];
 	if (main_mtd && !ptable_ok) {
 		memcpy(axis_partitions, axis_default_partitions,
 		       sizeof(axis_default_partitions));
@@ -580,7 +581,7 @@
 			printk(KERN_INFO "axisflashmap: Adding RAM partition "
 			       "for rootfs image.\n");
 			err = mtdram_init_device(mtd_ram,
-						 (void *)partition[part].offset,
+						 (void *)(u_int32_t)partition[part].offset,
 						 partition[part].size,
 						 partition[part].name);
 			if (err)
diff --git a/arch/cris/arch-v32/drivers/mach-a3/gpio.c b/arch/cris/arch-v32/drivers/mach-a3/gpio.c
index 74f9fe8..c92e1da 100644
--- a/arch/cris/arch-v32/drivers/mach-a3/gpio.c
+++ b/arch/cris/arch-v32/drivers/mach-a3/gpio.c
@@ -957,7 +957,7 @@
 
 static int __init gpio_init(void)
 {
-	int res;
+	int res, res2;
 
 	printk(KERN_INFO "ETRAX FS GPIO driver v2.7, (c) 2003-2008 "
 		"Axis Communications AB\n");
@@ -977,7 +977,7 @@
 	CRIS_LED_DISK_READ(0);
 	CRIS_LED_DISK_WRITE(0);
 
-	int res2 = request_irq(GIO_INTR_VECT, gpio_interrupt,
+	res2 = request_irq(GIO_INTR_VECT, gpio_interrupt,
 		IRQF_SHARED, "gpio", &alarmlist);
 	if (res2) {
 		printk(KERN_ERR "err: irq for gpio\n");
diff --git a/arch/cris/arch-v32/drivers/mach-fs/gpio.c b/arch/cris/arch-v32/drivers/mach-fs/gpio.c
index 009f4ee..72968fb 100644
--- a/arch/cris/arch-v32/drivers/mach-fs/gpio.c
+++ b/arch/cris/arch-v32/drivers/mach-fs/gpio.c
@@ -425,12 +425,11 @@
 	if (p > GPIO_MINOR_LAST)
 		return -EINVAL;
 
-	priv = kmalloc(sizeof(struct gpio_private), GFP_KERNEL);
+	priv = kzalloc(sizeof(struct gpio_private), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
 	mutex_lock(&gpio_mutex);
-	memset(priv, 0, sizeof(*priv));
 
 	priv->minor = p;
 
diff --git a/arch/cris/arch-v32/kernel/entry.S b/arch/cris/arch-v32/kernel/entry.S
index 026a0b2..b17a209 100644
--- a/arch/cris/arch-v32/kernel/entry.S
+++ b/arch/cris/arch-v32/kernel/entry.S
@@ -240,6 +240,17 @@
 
 	.type	_Rexit,@function
 _Rexit:
+#if defined(CONFIG_TRACE_IRQFLAGS)
+	addoq	+PT_ccs, $sp, $acr
+	move.d	[$acr], $r0
+	btstq	15, $r0		; I1
+	bpl	1f
+	nop
+	jsr	trace_hardirqs_on
+	nop
+1:
+#endif
+
 	;; This epilogue MUST match the prologues in multiple_interrupt, irq.h
 	;; and ptregs.h.
 	addq	4, $sp		; Skip orig_r10.
@@ -875,6 +886,14 @@
 	.long sys_process_vm_writev
 	.long sys_kcmp			/* 350 */
 	.long sys_finit_module
+	.long sys_sched_setattr
+	.long sys_sched_getattr
+	.long sys_renameat2
+	.long sys_seccomp		/* 355 */
+	.long sys_getrandom
+	.long sys_memfd_create
+	.long sys_bpf
+	.long sys_execveat
 
 	/*
 	 * NOTE!! This doesn't have to be exact - we just have
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
index cebd32e..c7ce784 100644
--- a/arch/cris/arch-v32/kernel/process.c
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -23,9 +23,9 @@
 /* We use this if we don't have any better idle routine. */
 void default_idle(void)
 {
+	local_irq_enable();
 	/* Halt until exception. */
-	__asm__ volatile("ei    \n\t"
-			 "halt      ");
+	__asm__ volatile("halt");
 }
 
 /*
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
index 3a36ae6..150d1d7 100644
--- a/arch/cris/arch-v32/kernel/signal.c
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -19,7 +19,6 @@
 #include <asm/processor.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
-#include <arch/ptrace.h>
 #include <arch/hwregs/cpu_vect.h>
 
 extern unsigned long cris_signal_return_page;
diff --git a/arch/cris/arch-v32/mach-fs/pinmux.c b/arch/cris/arch-v32/mach-fs/pinmux.c
index 05a0470..d8a3a3c 100644
--- a/arch/cris/arch-v32/mach-fs/pinmux.c
+++ b/arch/cris/arch-v32/mach-fs/pinmux.c
@@ -46,6 +46,8 @@
 		pins[port][i] = mode;
 
 	crisv32_pinmux_set(port);
+
+	return 0;
 }
 
 static int crisv32_pinmux_init(void)
@@ -93,6 +95,7 @@
 	int ret = -EINVAL;
 	char saved[sizeof pins];
 	unsigned long flags;
+	reg_pinmux_rw_hwprot hwprot;
 
 	spin_lock_irqsave(&pinmux_lock, flags);
 
@@ -101,7 +104,7 @@
 
 	crisv32_pinmux_init();	/* Must be done before we read rw_hwprot */
 
-	reg_pinmux_rw_hwprot hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot);
+	hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot);
 
 	switch (function) {
 	case pinmux_ser1:
@@ -227,6 +230,7 @@
 	int ret = -EINVAL;
 	char saved[sizeof pins];
 	unsigned long flags;
+	reg_pinmux_rw_hwprot hwprot;
 
 	spin_lock_irqsave(&pinmux_lock, flags);
 
@@ -235,7 +239,7 @@
 
 	crisv32_pinmux_init();	/* Must be done before we read rw_hwprot */
 
-	reg_pinmux_rw_hwprot hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot);
+	hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot);
 
 	switch (function) {
 	case pinmux_ser1:
diff --git a/arch/cris/configs/artpec_3_defconfig b/arch/cris/configs/artpec_3_defconfig
index 71854d4..70e497e 100644
--- a/arch/cris/configs/artpec_3_defconfig
+++ b/arch/cris/configs/artpec_3_defconfig
@@ -12,10 +12,6 @@
 CONFIG_CRIS_MACH_ARTPEC3=y
 CONFIG_ETRAX_DRAM_SIZE=32
 CONFIG_ETRAX_FLASH1_SIZE=4
-CONFIG_ETRAX_DEF_GIO_PA_OE=1c
-CONFIG_ETRAX_DEF_GIO_PA_OUT=00
-CONFIG_ETRAX_DEF_GIO_PB_OE=00000
-CONFIG_ETRAX_DEF_GIO_PB_OUT=00000
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -42,3 +38,4 @@
 CONFIG_CRAMFS=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
+CONFIG_ETRAX_GPIO=y
diff --git a/arch/cris/configs/etraxfs_defconfig b/arch/cris/configs/etraxfs_defconfig
index 87c7227..9123268 100644
--- a/arch/cris/configs/etraxfs_defconfig
+++ b/arch/cris/configs/etraxfs_defconfig
@@ -38,3 +38,4 @@
 CONFIG_CRAMFS=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
+CONFIG_ETRAX_GPIO=y
diff --git a/arch/cris/include/arch-v32/arch/bug.h b/arch/cris/include/arch-v32/arch/bug.h
index 0f211e1..fb59faa 100644
--- a/arch/cris/include/arch-v32/arch/bug.h
+++ b/arch/cris/include/arch-v32/arch/bug.h
@@ -10,6 +10,7 @@
  * All other stuff is done out-of-band with exception handlers.
  */
 #define BUG()								\
+do {									\
 	__asm__ __volatile__ ("0: break 14\n\t"				\
 			      ".section .fixup,\"ax\"\n"		\
 			      "1:\n\t"					\
@@ -21,9 +22,15 @@
 			      ".section __ex_table,\"a\"\n\t"		\
 			      ".dword 0b, 1b\n\t"			\
 			      ".previous\n\t"				\
-			      : : "ri" (__FILE__), "i" (__LINE__))
+			      : : "ri" (__FILE__), "i" (__LINE__));	\
+	unreachable();				\
+} while (0)
 #else
-#define BUG() __asm__ __volatile__ ("break 14\n\t")
+#define BUG() 					\
+do {						\
+	__asm__ __volatile__ ("break 14\n\t");	\
+	unreachable();				\
+} while (0)
 #endif
 
 #define HAVE_ARCH_BUG
diff --git a/arch/cris/include/arch-v32/arch/irqflags.h b/arch/cris/include/arch-v32/arch/irqflags.h
index 041851f..5f6fddf 100644
--- a/arch/cris/include/arch-v32/arch/irqflags.h
+++ b/arch/cris/include/arch-v32/arch/irqflags.h
@@ -2,7 +2,7 @@
 #define __ASM_CRIS_ARCH_IRQFLAGS_H
 
 #include <linux/types.h>
-#include <arch/ptrace.h>
+#include <asm/ptrace.h>
 
 static inline unsigned long arch_local_save_flags(void)
 {
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index ad2244f..b7f6819 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -1,14 +1,20 @@
 generic-y += atomic.h
+generic-y += auxvec.h
 generic-y += barrier.h
+generic-y += bitsperlong.h
 generic-y += clkdev.h
 generic-y += cmpxchg.h
 generic-y += cputime.h
 generic-y += device.h
 generic-y += div64.h
+generic-y += errno.h
 generic-y += exec.h
 generic-y += emergency-restart.h
+generic-y += fcntl.h
 generic-y += futex.h
 generic-y += hardirq.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
 generic-y += irq_regs.h
 generic-y += irq_work.h
 generic-y += kdebug.h
@@ -19,11 +25,22 @@
 generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
+generic-y += mman.h
 generic-y += module.h
+generic-y += msgbuf.h
 generic-y += percpu.h
+generic-y += poll.h
 generic-y += preempt.h
+generic-y += resource.h
 generic-y += sections.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += statfs.h
 generic-y += topology.h
 generic-y += trace_clock.h
+generic-y += types.h
 generic-y += vga.h
 generic-y += xor.h
diff --git a/arch/cris/include/asm/mmu_context.h b/arch/cris/include/asm/mmu_context.h
index 1d45fd6..349acfd 100644
--- a/arch/cris/include/asm/mmu_context.h
+++ b/arch/cris/include/asm/mmu_context.h
@@ -11,7 +11,14 @@
 
 #define deactivate_mm(tsk,mm)	do { } while (0)
 
-#define activate_mm(prev,next) switch_mm((prev),(next),NULL)
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	switch_mm(prev, next, NULL);
+	local_irq_restore(flags);
+}
 
 /* current active pgd - this is similar to other processors pgd 
  * registers like cr3 on the i386
diff --git a/arch/cris/include/asm/stacktrace.h b/arch/cris/include/asm/stacktrace.h
new file mode 100644
index 0000000..2d90856
--- /dev/null
+++ b/arch/cris/include/asm/stacktrace.h
@@ -0,0 +1,8 @@
+#ifndef __CRIS_STACKTRACE_H
+#define __CRIS_STACKTRACE_H
+
+void walk_stackframe(unsigned long sp,
+		     int (*fn)(unsigned long addr, void *data),
+		     void *data);
+
+#endif
diff --git a/arch/cris/include/asm/types.h b/arch/cris/include/asm/types.h
deleted file mode 100644
index a3cac77..0000000
--- a/arch/cris/include/asm/types.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ETRAX_TYPES_H
-#define _ETRAX_TYPES_H
-
-#include <uapi/asm/types.h>
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-
-#define BITS_PER_LONG 32
-
-#endif
diff --git a/arch/cris/include/asm/unistd.h b/arch/cris/include/asm/unistd.h
index 0f40fed..9c23535 100644
--- a/arch/cris/include/asm/unistd.h
+++ b/arch/cris/include/asm/unistd.h
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls 360
+#define NR_syscalls 365
 
 #include <arch/unistd.h>
 
diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild
index 01f66b8..d5564a0 100644
--- a/arch/cris/include/uapi/asm/Kbuild
+++ b/arch/cris/include/uapi/asm/Kbuild
@@ -6,6 +6,9 @@
 header-y += auxvec.h
 header-y += bitsperlong.h
 header-y += byteorder.h
+header-y += elf.h
+header-y += elf_v10.h
+header-y += elf_v32.h
 header-y += errno.h
 header-y += ethernet.h
 header-y += etraxgpio.h
@@ -19,6 +22,8 @@
 header-y += poll.h
 header-y += posix_types.h
 header-y += ptrace.h
+header-y += ptrace_v10.h
+header-y += ptrace_v32.h
 header-y += resource.h
 header-y += rs485.h
 header-y += sembuf.h
diff --git a/arch/cris/include/uapi/asm/auxvec.h b/arch/cris/include/uapi/asm/auxvec.h
deleted file mode 100644
index cb30b01..0000000
--- a/arch/cris/include/uapi/asm/auxvec.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __ASMCRIS_AUXVEC_H
-#define __ASMCRIS_AUXVEC_H
-
-#endif
diff --git a/arch/cris/include/uapi/asm/bitsperlong.h b/arch/cris/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0..0000000
--- a/arch/cris/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/cris/include/asm/elf.h b/arch/cris/include/uapi/asm/elf.h
similarity index 95%
rename from arch/cris/include/asm/elf.h
rename to arch/cris/include/uapi/asm/elf.h
index c2a394f..a5df05b 100644
--- a/arch/cris/include/asm/elf.h
+++ b/arch/cris/include/uapi/asm/elf.h
@@ -5,7 +5,11 @@
  * ELF register definitions..
  */
 
-#include <asm/user.h>
+#ifdef __arch_v32
+#include <asm/elf_v32.h>
+#else
+#include <asm/elf_v10.h>
+#endif
 
 #define R_CRIS_NONE             0
 #define R_CRIS_8                1
@@ -32,7 +36,6 @@
 
 /* Note that NGREG is defined to ELF_NGREG in include/linux/elfcore.h, and is
    thus exposed to user-space. */
-#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 
 /* A placeholder; CRIS does not have any fp regs.  */
@@ -45,8 +48,6 @@
 #define ELF_DATA	ELFDATA2LSB
 #define ELF_ARCH	EM_CRIS
 
-#include <arch/elf.h>
-
 /* The master for these definitions is {binutils}/include/elf/cris.h:  */
 /* User symbols in this file have a leading underscore.  */
 #define EF_CRIS_UNDERSCORE		0x00000001
diff --git a/arch/cris/include/arch-v10/arch/elf.h b/arch/cris/include/uapi/asm/elf_v10.h
similarity index 97%
rename from arch/cris/include/arch-v10/arch/elf.h
rename to arch/cris/include/uapi/asm/elf_v10.h
index 1eb638a..3ea65ce 100644
--- a/arch/cris/include/arch-v10/arch/elf.h
+++ b/arch/cris/include/uapi/asm/elf_v10.h
@@ -1,10 +1,11 @@
 #ifndef __ASMCRIS_ARCH_ELF_H
 #define __ASMCRIS_ARCH_ELF_H
 
-#include <arch/system.h>
-
 #define ELF_MACH EF_CRIS_VARIANT_ANY_V0_V10
 
+/* Matches struct user_regs_struct */
+#define ELF_NGREG 35
+
 /*
  * This is used to ensure we don't load something for the wrong architecture.
  */
diff --git a/arch/cris/include/arch-v32/arch/elf.h b/arch/cris/include/uapi/asm/elf_v32.h
similarity index 97%
rename from arch/cris/include/arch-v32/arch/elf.h
rename to arch/cris/include/uapi/asm/elf_v32.h
index c46d582..f09fe49 100644
--- a/arch/cris/include/arch-v32/arch/elf.h
+++ b/arch/cris/include/uapi/asm/elf_v32.h
@@ -1,10 +1,11 @@
 #ifndef _ASM_CRIS_ELF_H
 #define _ASM_CRIS_ELF_H
 
-#include <arch/system.h>
-
 #define ELF_CORE_EFLAGS EF_CRIS_VARIANT_V32
 
+/* Matches struct user_regs_struct */
+#define ELF_NGREG 32
+
 /*
  * This is used to ensure we don't load something for the wrong architecture.
  */
diff --git a/arch/cris/include/uapi/asm/errno.h b/arch/cris/include/uapi/asm/errno.h
deleted file mode 100644
index 2bf5eb5..0000000
--- a/arch/cris/include/uapi/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _CRIS_ERRNO_H
-#define _CRIS_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif
diff --git a/arch/cris/include/uapi/asm/fcntl.h b/arch/cris/include/uapi/asm/fcntl.h
deleted file mode 100644
index 46ab12d..0000000
--- a/arch/cris/include/uapi/asm/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fcntl.h>
diff --git a/arch/cris/include/uapi/asm/ioctl.h b/arch/cris/include/uapi/asm/ioctl.h
deleted file mode 100644
index b279fe0..0000000
--- a/arch/cris/include/uapi/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/cris/include/uapi/asm/ipcbuf.h b/arch/cris/include/uapi/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51..0000000
--- a/arch/cris/include/uapi/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/cris/include/uapi/asm/kvm_para.h b/arch/cris/include/uapi/asm/kvm_para.h
deleted file mode 100644
index 14fab8f..0000000
--- a/arch/cris/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/cris/include/uapi/asm/mman.h b/arch/cris/include/uapi/asm/mman.h
deleted file mode 100644
index 8eebf89f5..0000000
--- a/arch/cris/include/uapi/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/cris/include/uapi/asm/msgbuf.h b/arch/cris/include/uapi/asm/msgbuf.h
deleted file mode 100644
index ada63df..0000000
--- a/arch/cris/include/uapi/asm/msgbuf.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef _CRIS_MSGBUF_H
-#define _CRIS_MSGBUF_H
-
-/* verbatim copy of asm-i386 version */
-
-/* 
- * The msqid64_ds structure for CRIS architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct msqid64_ds {
-	struct ipc64_perm msg_perm;
-	__kernel_time_t msg_stime;	/* last msgsnd time */
-	unsigned long	__unused1;
-	__kernel_time_t msg_rtime;	/* last msgrcv time */
-	unsigned long	__unused2;
-	__kernel_time_t msg_ctime;	/* last change time */
-	unsigned long	__unused3;
-	unsigned long  msg_cbytes;	/* current number of bytes on queue */
-	unsigned long  msg_qnum;	/* number of messages in queue */
-	unsigned long  msg_qbytes;	/* max number of bytes on queue */
-	__kernel_pid_t msg_lspid;	/* pid of last msgsnd */
-	__kernel_pid_t msg_lrpid;	/* last receive pid */
-	unsigned long  __unused4;
-	unsigned long  __unused5;
-};
-
-#endif /* _CRIS_MSGBUF_H */
diff --git a/arch/cris/include/uapi/asm/poll.h b/arch/cris/include/uapi/asm/poll.h
deleted file mode 100644
index c98509d..0000000
--- a/arch/cris/include/uapi/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/cris/include/uapi/asm/ptrace.h b/arch/cris/include/uapi/asm/ptrace.h
index c689c9b..bd8946f 100644
--- a/arch/cris/include/uapi/asm/ptrace.h
+++ b/arch/cris/include/uapi/asm/ptrace.h
@@ -1 +1,5 @@
-#include <arch/ptrace.h>
+#ifdef __arch_v32
+#include <asm/ptrace_v32.h>
+#else
+#include <asm/ptrace_v10.h>
+#endif
diff --git a/arch/cris/include/arch-v10/arch/ptrace.h b/arch/cris/include/uapi/asm/ptrace_v10.h
similarity index 100%
rename from arch/cris/include/arch-v10/arch/ptrace.h
rename to arch/cris/include/uapi/asm/ptrace_v10.h
diff --git a/arch/cris/include/arch-v32/arch/ptrace.h b/arch/cris/include/uapi/asm/ptrace_v32.h
similarity index 100%
rename from arch/cris/include/arch-v32/arch/ptrace.h
rename to arch/cris/include/uapi/asm/ptrace_v32.h
diff --git a/arch/cris/include/uapi/asm/resource.h b/arch/cris/include/uapi/asm/resource.h
deleted file mode 100644
index b5d2944..0000000
--- a/arch/cris/include/uapi/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _CRIS_RESOURCE_H
-#define _CRIS_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif
diff --git a/arch/cris/include/uapi/asm/sembuf.h b/arch/cris/include/uapi/asm/sembuf.h
deleted file mode 100644
index 7fed984..0000000
--- a/arch/cris/include/uapi/asm/sembuf.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _CRIS_SEMBUF_H
-#define _CRIS_SEMBUF_H
-
-/* 
- * The semid64_ds structure for CRIS architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct semid64_ds {
-	struct ipc64_perm sem_perm;		/* permissions .. see ipc.h */
-	__kernel_time_t	sem_otime;		/* last semop time */
-	unsigned long	__unused1;
-	__kernel_time_t	sem_ctime;		/* last change time */
-	unsigned long	__unused2;
-	unsigned long	sem_nsems;		/* no. of semaphores in array */
-	unsigned long	__unused3;
-	unsigned long	__unused4;
-};
-
-#endif /* _CRIS_SEMBUF_H */
diff --git a/arch/cris/include/uapi/asm/shmbuf.h b/arch/cris/include/uapi/asm/shmbuf.h
deleted file mode 100644
index 3239e3f..0000000
--- a/arch/cris/include/uapi/asm/shmbuf.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef _CRIS_SHMBUF_H
-#define _CRIS_SHMBUF_H
-
-/* 
- * The shmid64_ds structure for CRIS architecture (same as for i386)
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct shmid64_ds {
-	struct ipc64_perm	shm_perm;	/* operation perms */
-	size_t			shm_segsz;	/* size of segment (bytes) */
-	__kernel_time_t		shm_atime;	/* last attach time */
-	unsigned long		__unused1;
-	__kernel_time_t		shm_dtime;	/* last detach time */
-	unsigned long		__unused2;
-	__kernel_time_t		shm_ctime;	/* last change time */
-	unsigned long		__unused3;
-	__kernel_pid_t		shm_cpid;	/* pid of creator */
-	__kernel_pid_t		shm_lpid;	/* pid of last operator */
-	unsigned long		shm_nattch;	/* no. of current attaches */
-	unsigned long		__unused4;
-	unsigned long		__unused5;
-};
-
-struct shminfo64 {
-	unsigned long	shmmax;
-	unsigned long	shmmin;
-	unsigned long	shmmni;
-	unsigned long	shmseg;
-	unsigned long	shmall;
-	unsigned long	__unused1;
-	unsigned long	__unused2;
-	unsigned long	__unused3;
-	unsigned long	__unused4;
-};
-
-#endif /* _CRIS_SHMBUF_H */
diff --git a/arch/cris/include/uapi/asm/siginfo.h b/arch/cris/include/uapi/asm/siginfo.h
deleted file mode 100644
index c1cd6d1..0000000
--- a/arch/cris/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _CRIS_SIGINFO_H
-#define _CRIS_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h
deleted file mode 100644
index e2503d9f..0000000
--- a/arch/cris/include/uapi/asm/socket.h
+++ /dev/null
@@ -1,92 +0,0 @@
-#ifndef _ASM_SOCKET_H
-#define _ASM_SOCKET_H
-
-/* almost the same as asm-i386/socket.h */
-
-#include <asm/sockios.h>
-
-/* For setsockoptions(2) */
-#define SOL_SOCKET	1
-
-#define SO_DEBUG	1
-#define SO_REUSEADDR	2
-#define SO_TYPE		3
-#define SO_ERROR	4
-#define SO_DONTROUTE	5
-#define SO_BROADCAST	6
-#define SO_SNDBUF	7
-#define SO_RCVBUF	8
-#define SO_SNDBUFFORCE	32
-#define SO_RCVBUFFORCE	33
-#define SO_KEEPALIVE	9
-#define SO_OOBINLINE	10
-#define SO_NO_CHECK	11
-#define SO_PRIORITY	12
-#define SO_LINGER	13
-#define SO_BSDCOMPAT	14
-#define SO_REUSEPORT	15
-#define SO_PASSCRED	16
-#define SO_PEERCRED	17
-#define SO_RCVLOWAT	18
-#define SO_SNDLOWAT	19
-#define SO_RCVTIMEO	20
-#define SO_SNDTIMEO	21
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION		22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT	23
-#define SO_SECURITY_ENCRYPTION_NETWORK		24
-
-#define SO_BINDTODEVICE	25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER        26
-#define SO_DETACH_FILTER        27
-#define SO_GET_FILTER		SO_ATTACH_FILTER
-
-#define SO_PEERNAME		28
-#define SO_TIMESTAMP           29
-#define SCM_TIMESTAMP          SO_TIMESTAMP
-
-#define SO_ACCEPTCONN          30
-
-#define SO_PEERSEC             31
-#define SO_PASSSEC		34
-#define SO_TIMESTAMPNS		35
-#define SCM_TIMESTAMPNS		SO_TIMESTAMPNS
-
-#define SO_MARK			36
-
-#define SO_TIMESTAMPING		37
-#define SCM_TIMESTAMPING	SO_TIMESTAMPING
-
-#define SO_PROTOCOL		38
-#define SO_DOMAIN		39
-
-#define SO_RXQ_OVFL             40
-
-#define SO_WIFI_STATUS		41
-#define SCM_WIFI_STATUS		SO_WIFI_STATUS
-#define SO_PEEK_OFF		42
-
-/* Instruct lower device to use last 4-bytes of skb data as FCS */
-#define SO_NOFCS		43
-
-#define SO_LOCK_FILTER		44
-
-#define SO_SELECT_ERR_QUEUE	45
-
-#define SO_BUSY_POLL		46
-
-#define SO_MAX_PACING_RATE	47
-
-#define SO_BPF_EXTENSIONS	48
-
-#define SO_INCOMING_CPU		49
-
-#define SO_ATTACH_BPF		50
-#define SO_DETACH_BPF		SO_DETACH_FILTER
-
-#endif /* _ASM_SOCKET_H */
-
-
diff --git a/arch/cris/include/uapi/asm/sockios.h b/arch/cris/include/uapi/asm/sockios.h
deleted file mode 100644
index cfe7bfe..0000000
--- a/arch/cris/include/uapi/asm/sockios.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ARCH_CRIS_SOCKIOS__
-#define __ARCH_CRIS_SOCKIOS__
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 	0x8901
-#define SIOCSPGRP	0x8902
-#define FIOGETOWN	0x8903
-#define SIOCGPGRP	0x8904
-#define SIOCATMARK	0x8905
-#define SIOCGSTAMP	0x8906		/* Get stamp (timeval) */
-#define SIOCGSTAMPNS	0x8907		/* Get stamp (timespec) */
-
-#endif
diff --git a/arch/cris/include/uapi/asm/statfs.h b/arch/cris/include/uapi/asm/statfs.h
deleted file mode 100644
index fdaf921..0000000
--- a/arch/cris/include/uapi/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _CRIS_STATFS_H
-#define _CRIS_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif
diff --git a/arch/cris/include/uapi/asm/types.h b/arch/cris/include/uapi/asm/types.h
deleted file mode 100644
index 9ec9d4c..0000000
--- a/arch/cris/include/uapi/asm/types.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/int-ll64.h>
diff --git a/arch/cris/include/uapi/asm/unistd.h b/arch/cris/include/uapi/asm/unistd.h
index f3287fa..062b648 100644
--- a/arch/cris/include/uapi/asm/unistd.h
+++ b/arch/cris/include/uapi/asm/unistd.h
@@ -356,5 +356,13 @@
 #define __NR_process_vm_writev	349
 #define __NR_kcmp		350
 #define __NR_finit_module	351
+#define __NR_sched_setattr	352
+#define __NR_sched_getattr	353
+#define __NR_renameat2		354
+#define __NR_seccomp		355
+#define __NR_getrandom		356
+#define __NR_memfd_create	357
+#define __NR_bpf		358
+#define __NR_execveat		359
 
 #endif /* _UAPI_ASM_CRIS_UNISTD_H_ */
diff --git a/arch/cris/kernel/Makefile b/arch/cris/kernel/Makefile
index edef71f..5fae398 100644
--- a/arch/cris/kernel/Makefile
+++ b/arch/cris/kernel/Makefile
@@ -8,6 +8,7 @@
 
 obj-y   := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o
 obj-y += devicetree.o
+obj-y += stacktrace.o
 
 obj-$(CONFIG_MODULES)    += crisksyms.o
 obj-$(CONFIG_MODULES)	 += module.o
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index dd0be5d..694850e 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -45,7 +45,11 @@
 asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
 {
 	unsigned long sp;
-	struct pt_regs *old_regs = set_irq_regs(regs);
+	struct pt_regs *old_regs;
+
+	trace_hardirqs_off();
+
+	old_regs = set_irq_regs(regs);
 	irq_enter();
 	sp = rdsp();
 	if (unlikely((sp & (PAGE_SIZE - 1)) < (PAGE_SIZE/8))) {
diff --git a/arch/cris/kernel/stacktrace.c b/arch/cris/kernel/stacktrace.c
new file mode 100644
index 0000000..99838c7
--- /dev/null
+++ b/arch/cris/kernel/stacktrace.c
@@ -0,0 +1,76 @@
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/stacktrace.h>
+#include <asm/stacktrace.h>
+
+void walk_stackframe(unsigned long sp,
+		     int (*fn)(unsigned long addr, void *data),
+		     void *data)
+{
+	unsigned long high = ALIGN(sp, THREAD_SIZE);
+
+	for (; sp <= high - 4; sp += 4) {
+		unsigned long addr = *(unsigned long *) sp;
+
+		if (!kernel_text_address(addr))
+			continue;
+
+		if (fn(addr, data))
+			break;
+	}
+}
+
+struct stack_trace_data {
+	struct stack_trace *trace;
+	unsigned int no_sched_functions;
+	unsigned int skip;
+};
+
+#ifdef CONFIG_STACKTRACE
+
+static int save_trace(unsigned long addr, void *d)
+{
+	struct stack_trace_data *data = d;
+	struct stack_trace *trace = data->trace;
+
+	if (data->no_sched_functions && in_sched_functions(addr))
+		return 0;
+
+	if (data->skip) {
+		data->skip--;
+		return 0;
+	}
+
+	trace->entries[trace->nr_entries++] = addr;
+
+	return trace->nr_entries >= trace->max_entries;
+}
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+	struct stack_trace_data data;
+	unsigned long sp;
+
+	data.trace = trace;
+	data.skip = trace->skip;
+
+	if (tsk != current) {
+		data.no_sched_functions = 1;
+		sp = tsk->thread.ksp;
+	} else {
+		data.no_sched_functions = 0;
+		sp = rdsp();
+	}
+
+	walk_stackframe(sp, save_trace, &data);
+	if (trace->nr_entries < trace->max_entries)
+		trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+
+void save_stack_trace(struct stack_trace *trace)
+{
+	save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif /* CONFIG_STACKTRACE */
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index f9c86c4..f211839 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -294,6 +294,8 @@
 	printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number);
 #endif
 
+	pci_read_bridge_bases(bus);
+
 	if (bus->number == 0) {
 		struct pci_dev *dev;
 		list_for_each_entry(dev, &bus->devices, bus_list) {
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 95c39b9..99c96a5 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
 
 
 
-#define NR_syscalls			319 /* length of syscall table */
+#define NR_syscalls			321 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 4610795..98e94e1 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -332,5 +332,7 @@
 #define __NR_memfd_create		1340
 #define __NR_bpf			1341
 #define __NR_execveat			1342
+#define __NR_userfaultfd		1343
+#define __NR_membarrier			1344
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index ae0de7b..37cc7a6 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1768,5 +1768,7 @@
 	data8 sys_memfd_create			// 1340
 	data8 sys_bpf
 	data8 sys_execveat
+	data8 sys_userfaultfd
+	data8 sys_membarrier
 
 	.org sys_call_table + 8*NR_syscalls	// guard against failures to increase NR_syscalls
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index d89b601..7cc3be9 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -533,9 +533,10 @@
 {
 	struct pci_dev *dev;
 
-	if (b->self)
+	if (b->self) {
+		pci_read_bridge_bases(b);
 		pcibios_fixup_bridge_resources(b->self);
-
+	}
 	list_for_each_entry(dev, &b->devices, bus_list)
 		pcibios_fixup_device_resources(dev);
 	platform_pci_fixup_bus(b);
diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c
index 47b5f90..7ff739e 100644
--- a/arch/m68k/amiga/amiints.c
+++ b/arch/m68k/amiga/amiints.c
@@ -46,7 +46,7 @@
  * The builtin Amiga hardware interrupt handlers.
  */
 
-static void ami_int1(unsigned int irq, struct irq_desc *desc)
+static void ami_int1(struct irq_desc *desc)
 {
 	unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
 
@@ -69,7 +69,7 @@
 	}
 }
 
-static void ami_int3(unsigned int irq, struct irq_desc *desc)
+static void ami_int3(struct irq_desc *desc)
 {
 	unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
 
@@ -92,7 +92,7 @@
 	}
 }
 
-static void ami_int4(unsigned int irq, struct irq_desc *desc)
+static void ami_int4(struct irq_desc *desc)
 {
 	unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
 
@@ -121,7 +121,7 @@
 	}
 }
 
-static void ami_int5(unsigned int irq, struct irq_desc *desc)
+static void ami_int5(struct irq_desc *desc)
 {
 	unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
 
diff --git a/arch/m68k/coldfire/intc-5272.c b/arch/m68k/coldfire/intc-5272.c
index 47371de..b0a19e2 100644
--- a/arch/m68k/coldfire/intc-5272.c
+++ b/arch/m68k/coldfire/intc-5272.c
@@ -143,12 +143,10 @@
  * We need to be careful with the masking/acking due to the side effects
  * of masking an interrupt.
  */
-static void intc_external_irq(unsigned int __irq, struct irq_desc *desc)
+static void intc_external_irq(struct irq_desc *desc)
 {
-	unsigned int irq = irq_desc_get_irq(desc);
-
 	irq_desc_get_chip(desc)->irq_ack(&desc->irq_data);
-	handle_simple_irq(irq, desc);
+	handle_simple_irq(desc);
 }
 
 static struct irq_chip intc_irq_chip = {
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index 81ca118..a644f4a 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -64,8 +64,7 @@
 						      struct pt_regs *));
 extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt);
 extern void m68k_setup_irq_controller(struct irq_chip *,
-				      void (*handle)(unsigned int irq,
-						     struct irq_desc *desc),
+				      void (*handle)(struct irq_desc *desc),
 				      unsigned int irq, unsigned int cnt);
 
 extern unsigned int irq_canonicalize(unsigned int irq);
diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h
index fe3fc9a..53c632c 100644
--- a/arch/m68k/include/asm/mac_via.h
+++ b/arch/m68k/include/asm/mac_via.h
@@ -261,7 +261,7 @@
 extern void via_irq_disable(int);
 extern void via_nubus_irq_startup(int irq);
 extern void via_nubus_irq_shutdown(int irq);
-extern void via1_irq(unsigned int irq, struct irq_desc *desc);
+extern void via1_irq(struct irq_desc *desc);
 extern void via1_set_head(int);
 extern int via2_scsi_drq_pending(void);
 
diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c
index 3fe0e43..f6f7d42 100644
--- a/arch/m68k/mac/baboon.c
+++ b/arch/m68k/mac/baboon.c
@@ -45,7 +45,7 @@
  * Baboon interrupt handler. This works a lot like a VIA.
  */
 
-static void baboon_irq(unsigned int irq, struct irq_desc *desc)
+static void baboon_irq(struct irq_desc *desc)
 {
 	int irq_bit, irq_num;
 	unsigned char events;
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index 191610d..55d65927 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -63,7 +63,7 @@
  * Handle miscellaneous OSS interrupts.
  */
 
-static void oss_irq(unsigned int __irq, struct irq_desc *desc)
+static void oss_irq(struct irq_desc *desc)
 {
 	int events = oss->irq_pending &
 		(OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM);
@@ -99,7 +99,7 @@
  * Unlike the VIA/RBV this is on its own autovector interrupt level.
  */
 
-static void oss_nubus_irq(unsigned int irq, struct irq_desc *desc)
+static void oss_nubus_irq(struct irq_desc *desc)
 {
 	int events, irq_bit, i;
 
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index 3b9e302..2290c0c 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -29,6 +29,7 @@
 
 int psc_present;
 volatile __u8 *psc;
+EXPORT_SYMBOL_GPL(psc);
 
 /*
  * Debugging dump, used in various places to see what's going on.
@@ -113,7 +114,7 @@
  * PSC interrupt handler. It's a lot like the VIA interrupt handler.
  */
 
-static void psc_irq(unsigned int __irq, struct irq_desc *desc)
+static void psc_irq(struct irq_desc *desc)
 {
 	unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc);
 	unsigned int irq = irq_desc_get_irq(desc);
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index e198dec..ce56e04 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -446,7 +446,7 @@
  * via6522.c :-), disable/pending masks added.
  */
 
-void via1_irq(unsigned int irq, struct irq_desc *desc)
+void via1_irq(struct irq_desc *desc)
 {
 	int irq_num;
 	unsigned char irq_bit, events;
@@ -467,7 +467,7 @@
 	} while (events >= irq_bit);
 }
 
-static void via2_irq(unsigned int irq, struct irq_desc *desc)
+static void via2_irq(struct irq_desc *desc)
 {
 	int irq_num;
 	unsigned char irq_bit, events;
@@ -493,7 +493,7 @@
  * VIA2 dispatcher as a fast interrupt handler.
  */
 
-void via_nubus_irq(unsigned int irq, struct irq_desc *desc)
+static void via_nubus_irq(struct irq_desc *desc)
 {
 	int slot_irq;
 	unsigned char slot_bit, events;
diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c
index a336094..3074b64 100644
--- a/arch/metag/kernel/irq.c
+++ b/arch/metag/kernel/irq.c
@@ -94,13 +94,11 @@
 			"MOV   D0.5,%0\n"
 			"MOV   D1Ar1,%1\n"
 			"MOV   D1RtP,%2\n"
-			"MOV   D0Ar2,%3\n"
 			"SWAP  A0StP,D0.5\n"
 			"SWAP  PC,D1RtP\n"
 			"MOV   A0StP,D0.5\n"
 			:
-			: "r" (isp), "r" (irq), "r" (desc->handle_irq),
-			  "r" (desc)
+			: "r" (isp), "r" (desc), "r" (desc->handle_irq)
 			: "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
 			  "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
 			  "D0.5"
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 6b8b752..ae838ed 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -863,7 +863,14 @@
 
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
-	/* Fixup the bus */
+	/* When called from the generic PCI probe, read PCI<->PCI bridge
+	 * bases. This is -not- called when generating the PCI tree from
+	 * the OF device-tree.
+	 */
+	if (bus->self != NULL)
+		pci_read_bridge_bases(bus);
+
+	/* Now fixup the bus bus */
 	pcibios_setup_bus_self(bus);
 
 	/* Now fixup devices on that bus */
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index 4c496c5..da9f922 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -851,7 +851,7 @@
 
 /* create chained handlers for the 4 IC requests to the MIPS IRQ ctrl */
 #define DISP(name, base, addr)						      \
-static void au1000_##name##_dispatch(unsigned int irq, struct irq_desc *d)    \
+static void au1000_##name##_dispatch(struct irq_desc *d)		      \
 {									      \
 	unsigned long r = __raw_readl((void __iomem *)KSEG1ADDR(addr));	      \
 	if (likely(r))							      \
@@ -865,7 +865,7 @@
 DISP(ic1r0, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ0INT)
 DISP(ic1r1, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ1INT)
 
-static void alchemy_gpic_dispatch(unsigned int irq, struct irq_desc *d)
+static void alchemy_gpic_dispatch(struct irq_desc *d)
 {
 	int i = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_PRIENC);
 	generic_handle_irq(ALCHEMY_GPIC_INT_BASE + i);
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
index 324ad72..faeddf1 100644
--- a/arch/mips/alchemy/devboards/bcsr.c
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -86,7 +86,7 @@
 /*
  * DB1200/PB1200 CPLD IRQ muxer
  */
-static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
+static void bcsr_csc_handler(struct irq_desc *d)
 {
 	unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
 	struct irq_chip *chip = irq_desc_get_chip(d);
diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c
index ec9a371..8da9961 100644
--- a/arch/mips/ath25/ar2315.c
+++ b/arch/mips/ath25/ar2315.c
@@ -69,7 +69,7 @@
 	.name		= "ar2315-ahb-error",
 };
 
-static void ar2315_misc_irq_handler(unsigned irq, struct irq_desc *desc)
+static void ar2315_misc_irq_handler(struct irq_desc *desc)
 {
 	u32 pending = ar2315_rst_reg_read(AR2315_ISR) &
 		      ar2315_rst_reg_read(AR2315_IMR);
diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c
index e63e38f..acd55a9 100644
--- a/arch/mips/ath25/ar5312.c
+++ b/arch/mips/ath25/ar5312.c
@@ -73,7 +73,7 @@
 	.name    = "ar5312-ahb-error",
 };
 
-static void ar5312_misc_irq_handler(unsigned irq, struct irq_desc *desc)
+static void ar5312_misc_irq_handler(struct irq_desc *desc)
 {
 	u32 pending = ar5312_rst_reg_read(AR5312_ISR) &
 		      ar5312_rst_reg_read(AR5312_IMR);
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 807132b..eeb3953 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -26,7 +26,7 @@
 #include "common.h"
 #include "machtypes.h"
 
-static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ath79_misc_irq_handler(struct irq_desc *desc)
 {
 	void __iomem *base = ath79_reset_base;
 	u32 pending;
@@ -119,7 +119,7 @@
 	irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler);
 }
 
-static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+static void ar934x_ip2_irq_dispatch(struct irq_desc *desc)
 {
 	u32 status;
 
@@ -148,7 +148,7 @@
 	irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch);
 }
 
-static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+static void qca955x_ip2_irq_dispatch(struct irq_desc *desc)
 {
 	u32 status;
 
@@ -171,7 +171,7 @@
 	}
 }
 
-static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+static void qca955x_ip3_irq_dispatch(struct irq_desc *desc)
 {
 	u32 status;
 
@@ -293,8 +293,26 @@
 
 	return 0;
 }
-IRQCHIP_DECLARE(ath79_misc_intc, "qca,ar7100-misc-intc",
-		ath79_misc_intc_of_init);
+
+static int __init ar7100_misc_intc_of_init(
+	struct device_node *node, struct device_node *parent)
+{
+	ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+	return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
+		ar7100_misc_intc_of_init);
+
+static int __init ar7240_misc_intc_of_init(
+	struct device_node *node, struct device_node *parent)
+{
+	ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+	return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
+		ar7240_misc_intc_of_init);
 
 static int __init ar79_cpu_intc_of_init(
 	struct device_node *node, struct device_node *parent)
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index f26c3c6..0352bc8 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -2221,7 +2221,7 @@
 			if (irqd_get_trigger_type(irq_data) &
 				IRQ_TYPE_EDGE_BOTH)
 				cvmx_write_csr(host_data->raw_reg, 1ull << i);
-			generic_handle_irq_desc(irq, desc);
+			generic_handle_irq_desc(desc);
 		}
 	}
 
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 9801ac9..fe67f12 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -20,6 +20,9 @@
 #ifndef cpu_has_tlb
 #define cpu_has_tlb		(cpu_data[0].options & MIPS_CPU_TLB)
 #endif
+#ifndef cpu_has_ftlb
+#define cpu_has_ftlb		(cpu_data[0].options & MIPS_CPU_FTLB)
+#endif
 #ifndef cpu_has_tlbinv
 #define cpu_has_tlbinv		(cpu_data[0].options & MIPS_CPU_TLBINV)
 #endif
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index cd89e98..82ad15f 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -385,6 +385,7 @@
 #define MIPS_CPU_CDMM		0x4000000000ull	/* CPU has Common Device Memory Map */
 #define MIPS_CPU_BP_GHIST	0x8000000000ull /* R12K+ Branch Prediction Global History */
 #define MIPS_CPU_SP		0x10000000000ull /* Small (1KB) page support */
+#define MIPS_CPU_FTLB		0x20000000000ull /* CPU has Fixed-page-size TLB */
 
 /*
  * CPU ASE encodings
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index e8c8d9d..5a1a882 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -61,6 +61,7 @@
 #define KVM_PRIVATE_MEM_SLOTS 	0
 
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_HALT_POLL_NS_DEFAULT 500000
 
 
 
@@ -128,6 +129,7 @@
 	u32 msa_disabled_exits;
 	u32 flush_dcache_exits;
 	u32 halt_successful_poll;
+	u32 halt_attempted_poll;
 	u32 halt_wakeup;
 };
 
diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h
index b02891f..21d9607 100644
--- a/arch/mips/include/asm/maar.h
+++ b/arch/mips/include/asm/maar.h
@@ -66,6 +66,15 @@
 }
 
 /**
+ * maar_init() - initialise MAARs
+ *
+ * Performs initialisation of MAARs for the current CPU, making use of the
+ * platforms implementation of platform_maar_init where necessary and
+ * duplicating the setup it provides on secondary CPUs.
+ */
+extern void maar_init(void);
+
+/**
  * struct maar_config - MAAR configuration data
  * @lower:	The lowest address that the MAAR pair will affect. Must be
  *		aligned to a 2^16 byte boundary.
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index d75b75e..1f1927a 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -194,6 +194,7 @@
 BUILD_CM_R_(gic_status,		MIPS_CM_GCB_OFS + 0xd0)
 BUILD_CM_R_(cpc_status,		MIPS_CM_GCB_OFS + 0xf0)
 BUILD_CM_RW(l2_config,		MIPS_CM_GCB_OFS + 0x130)
+BUILD_CM_RW(sys_config2,	MIPS_CM_GCB_OFS + 0x150)
 
 /* Core Local & Core Other register accessor functions */
 BUILD_CM_Cx_RW(reset_release,	0x00)
@@ -316,6 +317,10 @@
 #define CM_GCR_L2_CONFIG_ASSOC_SHF		0
 #define CM_GCR_L2_CONFIG_ASSOC_MSK		(_ULCAST_(0xff) << 0)
 
+/* GCR_SYS_CONFIG2 register fields */
+#define CM_GCR_SYS_CONFIG2_MAXVPW_SHF		0
+#define CM_GCR_SYS_CONFIG2_MAXVPW_MSK		(_ULCAST_(0xf) << 0)
+
 /* GCR_Cx_COHERENCE register fields */
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF	0
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK	(_ULCAST_(0xff) << 0)
@@ -405,4 +410,38 @@
 	return read_gcr_rev();
 }
 
+/**
+ * mips_cm_max_vp_width() - return the width in bits of VP indices
+ *
+ * Return: the width, in bits, of VP indices in fields that combine core & VP
+ * indices.
+ */
+static inline unsigned int mips_cm_max_vp_width(void)
+{
+	extern int smp_num_siblings;
+
+	if (mips_cm_revision() >= CM_REV_CM3)
+		return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
+
+	return smp_num_siblings;
+}
+
+/**
+ * mips_cm_vp_id() - calculate the hardware VP ID for a CPU
+ * @cpu: the CPU whose VP ID to calculate
+ *
+ * Hardware such as the GIC uses identifiers for VPs which may not match the
+ * CPU numbers used by Linux. This function calculates the hardware VP
+ * identifier corresponding to a given CPU.
+ *
+ * Return: the VP ID for the CPU.
+ */
+static inline unsigned int mips_cm_vp_id(unsigned int cpu)
+{
+	unsigned int core = cpu_data[cpu].core;
+	unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
+
+	return (core * mips_cm_max_vp_width()) + vp;
+}
+
 #endif /* __MIPS_ASM_MIPS_CM_H__ */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index d3cd8ea..c64781c 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -487,6 +487,8 @@
 
 /* Bits specific to the MIPS32/64 PRA.	*/
 #define MIPS_CONF_MT		(_ULCAST_(7) <<	 7)
+#define MIPS_CONF_MT_TLB	(_ULCAST_(1) <<  7)
+#define MIPS_CONF_MT_FTLB	(_ULCAST_(4) <<  7)
 #define MIPS_CONF_AR		(_ULCAST_(7) << 10)
 #define MIPS_CONF_AT		(_ULCAST_(3) << 13)
 #define MIPS_CONF_M		(_ULCAST_(1) << 31)
diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h
index 2a4c128..be52c21 100644
--- a/arch/mips/include/asm/netlogic/common.h
+++ b/arch/mips/include/asm/netlogic/common.h
@@ -57,8 +57,8 @@
 #include <asm/mach-netlogic/multi-node.h>
 
 struct irq_desc;
-void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc);
-void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc);
+void nlm_smp_function_ipi_handler(struct irq_desc *desc);
+void nlm_smp_resched_ipi_handler(struct irq_desc *desc);
 void nlm_smp_irq_init(int hwcpuid);
 void nlm_boot_secondary_cpus(void);
 int nlm_wakeup_secondary_cpus(void);
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 6cd69fd..a74e181 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -291,7 +291,7 @@
 	writel(mask, reg);
 }
 
-static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
+static void jz_gpio_irq_demux_handler(struct irq_desc *desc)
 {
 	uint32_t flag;
 	unsigned int gpio_irq;
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 571a8e6..09a51d0 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -410,16 +410,18 @@
 static inline unsigned int decode_config0(struct cpuinfo_mips *c)
 {
 	unsigned int config0;
-	int isa;
+	int isa, mt;
 
 	config0 = read_c0_config();
 
 	/*
 	 * Look for Standard TLB or Dual VTLB and FTLB
 	 */
-	if ((((config0 & MIPS_CONF_MT) >> 7) == 1) ||
-	    (((config0 & MIPS_CONF_MT) >> 7) == 4))
+	mt = config0 & MIPS_CONF_MT;
+	if (mt == MIPS_CONF_MT_TLB)
 		c->options |= MIPS_CPU_TLB;
+	else if (mt == MIPS_CONF_MT_FTLB)
+		c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB;
 
 	isa = (config0 & MIPS_CONF_AT) >> 13;
 	switch (isa) {
@@ -559,15 +561,18 @@
 	if (cpu_has_tlb) {
 		if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
 			c->options |= MIPS_CPU_TLBINV;
+
 		/*
-		 * This is a bit ugly. R6 has dropped that field from
-		 * config4 and the only valid configuration is VTLB+FTLB so
-		 * set a good value for mmuextdef for that case.
+		 * R6 has dropped the MMUExtDef field from config4.
+		 * On R6 the fields always describe the FTLB, and only if it is
+		 * present according to Config.MT.
 		 */
-		if (cpu_has_mips_r6)
+		if (!cpu_has_mips_r6)
+			mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+		else if (cpu_has_ftlb)
 			mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT;
 		else
-			mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+			mmuextdef = 0;
 
 		switch (mmuextdef) {
 		case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 35b8316..4795151 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -338,7 +338,7 @@
 		if (end <= reserved_end)
 			continue;
 #ifdef CONFIG_BLK_DEV_INITRD
-		/* mapstart should be after initrd_end */
+		/* Skip zones before initrd and initrd itself */
 		if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
 			continue;
 #endif
@@ -371,6 +371,14 @@
 		max_low_pfn = PFN_DOWN(HIGHMEM_START);
 	}
 
+#ifdef CONFIG_BLK_DEV_INITRD
+	/*
+	 * mapstart should be after initrd_end
+	 */
+	if (initrd_end)
+		mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
+#endif
+
 	/*
 	 * Initialize the boot-time allocator with low memory only.
 	 */
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index a31896c..bd4385a 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -42,6 +42,7 @@
 #include <asm/mmu_context.h>
 #include <asm/time.h>
 #include <asm/setup.h>
+#include <asm/maar.h>
 
 cpumask_t cpu_callin_map;		/* Bitmask of started secondaries */
 
@@ -157,6 +158,7 @@
 	mips_clockevent_init();
 	mp_ops->init_secondary();
 	cpu_report();
+	maar_init();
 
 	/*
 	 * XXX parity protection should be folded in here when it's converted
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index cd4c129..49ff3bf 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -55,6 +55,7 @@
 	{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
 	{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
+	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
 	{ "halt_wakeup",  VCPU_STAT(halt_wakeup),	 KVM_STAT_VCPU },
 	{NULL}
 };
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index f6c44dd..d6d07ad 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -64,6 +64,9 @@
 	}
 	if (memsize == 0)
 		memsize = 256;
+
+	loongson_sysconf.nr_uarts = 1;
+
 	pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
 #else
 	struct boot_params *boot_p;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 66d0f49..8770e61 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -44,6 +44,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
 #include <asm/fixmap.h>
+#include <asm/maar.h>
 
 /*
  * We have up to 8 empty zeroed pages so we can map one of the right colour
@@ -252,6 +253,119 @@
 #endif
 }
 
+unsigned __weak platform_maar_init(unsigned num_pairs)
+{
+	struct maar_config cfg[BOOT_MEM_MAP_MAX];
+	unsigned i, num_configured, num_cfg = 0;
+	phys_addr_t skip;
+
+	for (i = 0; i < boot_mem_map.nr_map; i++) {
+		switch (boot_mem_map.map[i].type) {
+		case BOOT_MEM_RAM:
+		case BOOT_MEM_INIT_RAM:
+			break;
+		default:
+			continue;
+		}
+
+		skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
+
+		cfg[num_cfg].lower = boot_mem_map.map[i].addr;
+		cfg[num_cfg].lower += skip;
+
+		cfg[num_cfg].upper = cfg[num_cfg].lower;
+		cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
+		cfg[num_cfg].upper -= skip;
+
+		cfg[num_cfg].attrs = MIPS_MAAR_S;
+		num_cfg++;
+	}
+
+	num_configured = maar_config(cfg, num_cfg, num_pairs);
+	if (num_configured < num_cfg)
+		pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
+			num_pairs, num_cfg);
+
+	return num_configured;
+}
+
+void maar_init(void)
+{
+	unsigned num_maars, used, i;
+	phys_addr_t lower, upper, attr;
+	static struct {
+		struct maar_config cfgs[3];
+		unsigned used;
+	} recorded = { { { 0 } }, 0 };
+
+	if (!cpu_has_maar)
+		return;
+
+	/* Detect the number of MAARs */
+	write_c0_maari(~0);
+	back_to_back_c0_hazard();
+	num_maars = read_c0_maari() + 1;
+
+	/* MAARs should be in pairs */
+	WARN_ON(num_maars % 2);
+
+	/* Set MAARs using values we recorded already */
+	if (recorded.used) {
+		used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
+		BUG_ON(used != recorded.used);
+	} else {
+		/* Configure the required MAARs */
+		used = platform_maar_init(num_maars / 2);
+	}
+
+	/* Disable any further MAARs */
+	for (i = (used * 2); i < num_maars; i++) {
+		write_c0_maari(i);
+		back_to_back_c0_hazard();
+		write_c0_maar(0);
+		back_to_back_c0_hazard();
+	}
+
+	if (recorded.used)
+		return;
+
+	pr_info("MAAR configuration:\n");
+	for (i = 0; i < num_maars; i += 2) {
+		write_c0_maari(i);
+		back_to_back_c0_hazard();
+		upper = read_c0_maar();
+
+		write_c0_maari(i + 1);
+		back_to_back_c0_hazard();
+		lower = read_c0_maar();
+
+		attr = lower & upper;
+		lower = (lower & MIPS_MAAR_ADDR) << 4;
+		upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
+
+		pr_info("  [%d]: ", i / 2);
+		if (!(attr & MIPS_MAAR_V)) {
+			pr_cont("disabled\n");
+			continue;
+		}
+
+		pr_cont("%pa-%pa", &lower, &upper);
+
+		if (attr & MIPS_MAAR_S)
+			pr_cont(" speculate");
+
+		pr_cont("\n");
+
+		/* Record the setup for use on secondary CPUs */
+		if (used <= ARRAY_SIZE(recorded.cfgs)) {
+			recorded.cfgs[recorded.used].lower = lower;
+			recorded.cfgs[recorded.used].upper = upper;
+			recorded.cfgs[recorded.used].attrs = attr;
+			recorded.used++;
+		}
+	}
+}
+
 #ifndef CONFIG_NEED_MULTIPLE_NODES
 int page_is_ram(unsigned long pagenr)
 {
@@ -334,69 +448,6 @@
 #endif
 }
 
-unsigned __weak platform_maar_init(unsigned num_pairs)
-{
-	struct maar_config cfg[BOOT_MEM_MAP_MAX];
-	unsigned i, num_configured, num_cfg = 0;
-	phys_addr_t skip;
-
-	for (i = 0; i < boot_mem_map.nr_map; i++) {
-		switch (boot_mem_map.map[i].type) {
-		case BOOT_MEM_RAM:
-		case BOOT_MEM_INIT_RAM:
-			break;
-		default:
-			continue;
-		}
-
-		skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
-
-		cfg[num_cfg].lower = boot_mem_map.map[i].addr;
-		cfg[num_cfg].lower += skip;
-
-		cfg[num_cfg].upper = cfg[num_cfg].lower;
-		cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
-		cfg[num_cfg].upper -= skip;
-
-		cfg[num_cfg].attrs = MIPS_MAAR_S;
-		num_cfg++;
-	}
-
-	num_configured = maar_config(cfg, num_cfg, num_pairs);
-	if (num_configured < num_cfg)
-		pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
-			num_pairs, num_cfg);
-
-	return num_configured;
-}
-
-static void maar_init(void)
-{
-	unsigned num_maars, used, i;
-
-	if (!cpu_has_maar)
-		return;
-
-	/* Detect the number of MAARs */
-	write_c0_maari(~0);
-	back_to_back_c0_hazard();
-	num_maars = read_c0_maari() + 1;
-
-	/* MAARs should be in pairs */
-	WARN_ON(num_maars % 2);
-
-	/* Configure the required MAARs */
-	used = platform_maar_init(num_maars / 2);
-
-	/* Disable any further MAARs */
-	for (i = (used * 2); i < num_maars; i++) {
-		write_c0_maari(i);
-		back_to_back_c0_hazard();
-		write_c0_maar(0);
-		back_to_back_c0_hazard();
-	}
-}
-
 void __init mem_init(void)
 {
 #ifdef CONFIG_HIGHMEM
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 0c4a133..77cb273 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -1251,7 +1251,7 @@
 		bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
 
 	fp->bpf_func = (void *)ctx.target;
-	fp->jited = true;
+	fp->jited = 1;
 
 out:
 	kfree(ctx.offsets);
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
index e927260..dabf417 100644
--- a/arch/mips/net/bpf_jit_asm.S
+++ b/arch/mips/net/bpf_jit_asm.S
@@ -64,8 +64,20 @@
 	PTR_ADDU t1, $r_skb_data, offset
 	lw	$r_A, 0(t1)
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
 	wsbh	t0, $r_A
 	rotr	$r_A, t0, 16
+# else
+	sll	t0, $r_A, 24
+	srl	t1, $r_A, 24
+	srl	t2, $r_A, 8
+	or	t0, t0, t1
+	andi	t2, t2, 0xff00
+	andi	t1, $r_A, 0xff00
+	or	t0, t0, t2
+	sll	t1, t1, 8
+	or	$r_A, t0, t1
+# endif
 #endif
 	jr	$r_ra
 	 move	$r_ret, zero
@@ -80,8 +92,16 @@
 	PTR_ADDU t1, $r_skb_data, offset
 	lh	$r_A, 0(t1)
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
 	wsbh	t0, $r_A
 	seh	$r_A, t0
+# else
+	sll	t0, $r_A, 24
+	andi	t1, $r_A, 0xff00
+	sra	t0, t0, 16
+	srl	t1, t1, 8
+	or	$r_A, t0, t1
+# endif
 #endif
 	jr	$r_ra
 	 move	$r_ret, zero
@@ -148,23 +168,47 @@
 NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
 	bpf_slow_path_common(4)
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
 	wsbh	t0, $r_s0
 	jr	$r_ra
 	 rotr	$r_A, t0, 16
-#endif
+# else
+	sll	t0, $r_s0, 24
+	srl	t1, $r_s0, 24
+	srl	t2, $r_s0, 8
+	or	t0, t0, t1
+	andi	t2, t2, 0xff00
+	andi	t1, $r_s0, 0xff00
+	or	t0, t0, t2
+	sll	t1, t1, 8
 	jr	$r_ra
-	move	$r_A, $r_s0
+	 or	$r_A, t0, t1
+# endif
+#else
+	jr	$r_ra
+	 move	$r_A, $r_s0
+#endif
 
 	END(bpf_slow_path_word)
 
 NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
 	bpf_slow_path_common(2)
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
 	jr	$r_ra
 	 wsbh	$r_A, $r_s0
-#endif
+# else
+	sll	t0, $r_s0, 8
+	andi	t1, $r_s0, 0xff00
+	andi	t0, t0, 0xff00
+	srl	t1, t1, 8
+	jr	$r_ra
+	 or	$r_A, t0, t1
+# endif
+#else
 	jr	$r_ra
 	 move	$r_A, $r_s0
+#endif
 
 	END(bpf_slow_path_half)
 
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 0136b4f..10d86d5 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -82,7 +82,7 @@
 }
 
 /* IRQ_IPI_SMP_FUNCTION Handler */
-void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc)
+void nlm_smp_function_ipi_handler(struct irq_desc *desc)
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 	clear_c0_eimr(irq);
@@ -92,7 +92,7 @@
 }
 
 /* IRQ_IPI_SMP_RESCHEDULE  handler */
-void nlm_smp_resched_ipi_handler(unsigned int __irq, struct irq_desc *desc)
+void nlm_smp_resched_ipi_handler(struct irq_desc *desc)
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 	clear_c0_eimr(irq);
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
index f8d0acb..b4fa641 100644
--- a/arch/mips/pci/pci-ar2315.c
+++ b/arch/mips/pci/pci-ar2315.c
@@ -318,7 +318,7 @@
 	return 0;
 }
 
-static void ar2315_pci_irq_handler(unsigned irq, struct irq_desc *desc)
+static void ar2315_pci_irq_handler(struct irq_desc *desc)
 {
 	struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc);
 	u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) &
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index ad35a5e..7db963d 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -226,7 +226,7 @@
 	.write	= ar71xx_pci_write_config,
 };
 
-static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ar71xx_pci_irq_handler(struct irq_desc *desc)
 {
 	struct ar71xx_pci_controller *apc;
 	void __iomem *base = ath79_reset_base;
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 907d11d..2013dad 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -225,7 +225,7 @@
 	.write	= ar724x_pci_write,
 };
 
-static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ar724x_pci_irq_handler(struct irq_desc *desc)
 {
 	struct ar724x_pci_controller *apc;
 	void __iomem *base;
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index 53c8efa..ed6732f9 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -129,7 +129,7 @@
 	rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA);
 }
 
-static void rt3883_pci_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void rt3883_pci_irq_handler(struct irq_desc *desc)
 {
 	struct rt3883_pci_controller *rpc;
 	u32 pending;
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index c6996cf..b8a0bf5 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -311,6 +311,12 @@
 
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
+	struct pci_dev *dev = bus->self;
+
+	if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
+	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+		pci_read_bridge_bases(bus);
+	}
 }
 
 EXPORT_SYMBOL(PCIBIOS_MIN_IO);
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 8c624a8..4cf77f3 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -96,7 +96,7 @@
 	return CP0_LEGACY_COMPARE_IRQ;
 }
 
-static void ralink_intc_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ralink_intc_irq_handler(struct irq_desc *desc)
 {
 	u32 pending = rt_intc_r32(INTC_REG_STATUS0);
 
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index deaa893..3dfe2d3 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -324,6 +324,7 @@
 	struct pci_dev *dev;
 
 	if (bus->self) {
+		pci_read_bridge_bases(bus);
 		pcibios_fixup_bridge_resources(bus->self);
 	}
 
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 73eddda..4eec430 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -28,6 +28,9 @@
 endif
 ifdef CONFIG_CPU_BIG_ENDIAN
 BOOTCFLAGS	+= -mbig-endian
+else
+BOOTCFLAGS	+= -mlittle-endian
+BOOTCFLAGS	+= $(call cc-option,-mabi=elfv2)
 endif
 
 BOOTAFLAGS	:= -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
index 426bf41..5f51b7b 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
@@ -224,10 +224,12 @@
 
 /include/ "pq3-etsec2-0.dtsi"
 	enet0: enet0_grp2: ethernet@b0000 {
+		fsl,wake-on-filer;
 	};
 
 /include/ "pq3-etsec2-1.dtsi"
 	enet1: enet1_grp2: ethernet@b1000 {
+		fsl,wake-on-filer;
 	};
 
 	global-utilities@e0000 {
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 98eebbf..827a38d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -44,6 +44,7 @@
 #ifdef CONFIG_KVM_MMIO
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 #endif
+#define KVM_HALT_POLL_NS_DEFAULT 500000
 
 /* These values are internal and can be increased later */
 #define KVM_NR_IRQCHIPS          1
@@ -108,6 +109,7 @@
 	u32 dec_exits;
 	u32 ext_intr_exits;
 	u32 halt_successful_poll;
+	u32 halt_attempted_poll;
 	u32 halt_wakeup;
 	u32 dbell_exits;
 	u32 gdbell_exits;
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h
index 25784cc..1e155ca 100644
--- a/arch/powerpc/include/asm/qe_ic.h
+++ b/arch/powerpc/include/asm/qe_ic.h
@@ -59,14 +59,14 @@
 
 #ifdef CONFIG_QUICC_ENGINE
 void qe_ic_init(struct device_node *node, unsigned int flags,
-		void (*low_handler)(unsigned int irq, struct irq_desc *desc),
-		void (*high_handler)(unsigned int irq, struct irq_desc *desc));
+		void (*low_handler)(struct irq_desc *desc),
+		void (*high_handler)(struct irq_desc *desc));
 unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
 unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
 #else
 static inline void qe_ic_init(struct device_node *node, unsigned int flags,
-		void (*low_handler)(unsigned int irq, struct irq_desc *desc),
-		void (*high_handler)(unsigned int irq, struct irq_desc *desc))
+		void (*low_handler)(struct irq_desc *desc),
+		void (*high_handler)(struct irq_desc *desc))
 {}
 static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
 { return 0; }
@@ -78,8 +78,7 @@
 int qe_ic_set_priority(unsigned int virq, unsigned int priority);
 int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
 
-static inline void qe_ic_cascade_low_ipic(unsigned int irq,
-					  struct irq_desc *desc)
+static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
 {
 	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
 	unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
@@ -88,8 +87,7 @@
 		generic_handle_irq(cascade_irq);
 }
 
-static inline void qe_ic_cascade_high_ipic(unsigned int irq,
-					   struct irq_desc *desc)
+static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
 {
 	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
 	unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
@@ -98,8 +96,7 @@
 		generic_handle_irq(cascade_irq);
 }
 
-static inline void qe_ic_cascade_low_mpic(unsigned int irq,
-					  struct irq_desc *desc)
+static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
 {
 	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
 	unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
@@ -111,8 +108,7 @@
 	chip->irq_eoi(&desc->irq_data);
 }
 
-static inline void qe_ic_cascade_high_mpic(unsigned int irq,
-					   struct irq_desc *desc)
+static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
 {
 	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
 	unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
@@ -124,8 +120,7 @@
 	chip->irq_eoi(&desc->irq_data);
 }
 
-static inline void qe_ic_cascade_muxed_mpic(unsigned int irq,
-					    struct irq_desc *desc)
+static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
 {
 	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
 	unsigned int cascade_irq;
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 71f2b3f..126d0c4 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -368,3 +368,5 @@
 SYSCALL_SPU(bpf)
 COMPAT_SYS(execveat)
 PPC64ONLY(switch_endian)
+SYSCALL_SPU(userfaultfd)
+SYSCALL_SPU(membarrier)
diff --git a/arch/powerpc/include/asm/tsi108_pci.h b/arch/powerpc/include/asm/tsi108_pci.h
index 5653d7c..ae59d5b 100644
--- a/arch/powerpc/include/asm/tsi108_pci.h
+++ b/arch/powerpc/include/asm/tsi108_pci.h
@@ -39,7 +39,7 @@
 
 extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary);
 extern void tsi108_pci_int_init(struct device_node *node);
-extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc);
+extern void tsi108_irq_cascade(struct irq_desc *desc);
 extern void tsi108_clear_pci_cfg_error(void);
 
 #endif				/*  _ASM_POWERPC_TSI108_PCI_H */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index f4f8b66..13411be 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls		364
+#define __NR_syscalls		366
 
 #define __NR__exit __NR_exit
 #define NR_syscalls	__NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index e4aa173..6337738 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -386,5 +386,7 @@
 #define __NR_bpf		361
 #define __NR_execveat		362
 #define __NR_switch_endian	363
+#define __NR_userfaultfd	364
+#define __NR_membarrier		365
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 4509603..290559d 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -441,7 +441,7 @@
 
 		chip = irq_data_get_irq_chip(data);
 
-		cpumask_and(mask, data->affinity, map);
+		cpumask_and(mask, irq_data_get_affinity_mask(data), map);
 		if (cpumask_any(mask) >= nr_cpu_ids) {
 			pr_warn("Breaking affinity for irq %i\n", irq);
 			cpumask_copy(mask, map);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index a1d0632..7587b2a 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1032,7 +1032,13 @@
 
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
-	/* Fixup the bus */
+	/* When called from the generic PCI probe, read PCI<->PCI bridge
+	 * bases. This is -not- called when generating the PCI tree from
+	 * the OF device-tree.
+	 */
+	pci_read_bridge_bases(bus);
+
+	/* Now fixup the bus bus */
 	pcibios_setup_bus_self(bus);
 
 	/* Now fixup devices on that bus */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index bb02e9f..ad8c9db 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -38,6 +38,7 @@
 #include <asm/udbg.h>
 #include <asm/mmu_context.h>
 #include <asm/epapr_hcalls.h>
+#include <asm/code-patching.h>
 
 #define DBG(fmt...)
 
@@ -109,6 +110,8 @@
  * This is called very early on the boot process, after a minimal
  * MMU environment has been set up but before MMU_init is called.
  */
+extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
+
 notrace void __init machine_init(u64 dt_ptr)
 {
 	lockdep_init();
@@ -116,6 +119,9 @@
 	/* Enable early debugging if any specified (see udbg.h) */
 	udbg_early_init();
 
+	patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP);
+	patch_instruction(&memset_nocache_branch, PPC_INST_NOP);
+
 	/* Do some early initialization based on the flat device tree */
 	early_init_devtree(__va(dt_ptr));
 
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index d75bf32..099c79d 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -53,6 +53,7 @@
 	{ "ext_intr",    VCPU_STAT(ext_intr_exits) },
 	{ "queue_intr",  VCPU_STAT(queue_intr) },
 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
+	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
 	{ "pf_storage",  VCPU_STAT(pf_storage) },
 	{ "sp_storage",  VCPU_STAT(sp_storage) },
@@ -828,12 +829,15 @@
 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
 	u64 buf;
+	int srcu_idx;
 	int ret;
 
 	if (!is_power_of_2(size) || (size > sizeof(buf)))
 		return H_TOO_HARD;
 
+	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
+	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
 	if (ret != 0)
 		return H_TOO_HARD;
 
@@ -868,6 +872,7 @@
 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
 	unsigned long val = kvmppc_get_gpr(vcpu, 6);
 	u64 buf;
+	int srcu_idx;
 	int ret;
 
 	switch (size) {
@@ -891,7 +896,9 @@
 		return H_TOO_HARD;
 	}
 
+	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
+	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
 	if (ret != 0)
 		return H_TOO_HARD;
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 9754e68..2280497 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2692,9 +2692,13 @@
 
 	while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
 	       (vc->vcore_state == VCORE_RUNNING ||
-		vc->vcore_state == VCORE_EXITING))
+		vc->vcore_state == VCORE_EXITING ||
+		vc->vcore_state == VCORE_PIGGYBACK))
 		kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
 
+	if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
+		kvmppc_vcore_end_preempt(vc);
+
 	if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
 		kvmppc_remove_runnable(vc, vcpu);
 		vcpu->stat.signal_exits++;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 2273dca..b98889e 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1257,6 +1257,7 @@
 	bl	kvmhv_accumulate_time
 #endif
 
+	mr 	r3, r12
 	/* Increment exit count, poke other threads to exit */
 	bl	kvmhv_commence_exit
 	nop
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ae458f0..fd58751 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -63,6 +63,7 @@
 	{ "dec",        VCPU_STAT(dec_exits) },
 	{ "ext_intr",   VCPU_STAT(ext_intr_exits) },
 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
+	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
 	{ "doorbell", VCPU_STAT(dbell_exits) },
 	{ "guest doorbell", VCPU_STAT(gdbell_exits) },
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 2ef50c6..c44df2d 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -73,6 +73,10 @@
  * Use dcbz on the complete cache lines in the destination
  * to set them to zero.  This requires that the destination
  * area is cacheable.  -- paulus
+ *
+ * During early init, cache might not be active yet, so dcbz cannot be used.
+ * We therefore skip the optimised bloc that uses dcbz. This jump is
+ * replaced by a nop once cache is active. This is done in machine_init()
  */
 _GLOBAL(memset)
 	rlwimi	r4,r4,8,16,23
@@ -88,6 +92,8 @@
 	subf	r6,r0,r6
 	cmplwi	0,r4,0
 	bne	2f	/* Use normal procedure if r4 is not zero */
+_GLOBAL(memset_nocache_branch)
+	b	2f	/* Skip optimised bloc until cache is enabled */
 
 	clrlwi	r7,r6,32-LG_CACHELINE_BYTES
 	add	r8,r7,r5
@@ -128,6 +134,10 @@
  * the destination area is cacheable.
  * We only use this version if the source and dest don't overlap.
  * -- paulus.
+ *
+ * During early init, cache might not be active yet, so dcbz cannot be used.
+ * We therefore jump to generic_memcpy which doesn't use dcbz. This jump is
+ * replaced by a nop once cache is active. This is done in machine_init()
  */
 _GLOBAL(memmove)
 	cmplw	0,r3,r4
@@ -135,6 +145,7 @@
 	/* fall through */
 
 _GLOBAL(memcpy)
+	b	generic_memcpy
 	add	r7,r3,r5		/* test if the src & dst overlap */
 	add	r8,r4,r5
 	cmplw	0,r4,r7
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 43dafb9..4d87122 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -85,7 +85,6 @@
 	BUG_ON(index >= 4096);
 
 	vpn = hpt_vpn(ea, vsid, ssize);
-	hash = hpt_hash(vpn, shift, ssize);
 	hpte_slot_array = get_hpte_slot_array(pmdp);
 	if (psize == MMU_PAGE_4K) {
 		/*
@@ -101,6 +100,7 @@
 	valid = hpte_valid(hpte_slot_array, index);
 	if (valid) {
 		/* update the hpte bits */
+		hash = hpt_hash(vpn, shift, ssize);
 		hidx =  hpte_hash_index(hpte_slot_array, index);
 		if (hidx & _PTEIDX_SECONDARY)
 			hash = ~hash;
@@ -126,6 +126,7 @@
 	if (!valid) {
 		unsigned long hpte_group;
 
+		hash = hpt_hash(vpn, shift, ssize);
 		/* insert new entry */
 		pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
 		new_pmd |= _PAGE_HASHPTE;
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 17cea18..0478216 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -679,7 +679,7 @@
 		((u64 *)image)[1] = local_paca->kernel_toc;
 #endif
 		fp->bpf_func = (void *)image;
-		fp->jited = true;
+		fp->jited = 1;
 	}
 out:
 	kfree(addrs);
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index 11090ab..0035d14 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -104,9 +104,10 @@
 	return irq_linear_revmap(cpld_pic_host, cpld_irq);
 }
 
-static void
-cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
+static void cpld_pic_cascade(struct irq_desc *desc)
 {
+	unsigned int irq;
+
 	irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
 		&cpld_regs->pci_mask);
 	if (irq != NO_IRQ) {
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 32cae33..8fb9548 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -80,7 +80,7 @@
 	.irq_mask_ack = media5200_irq_mask,
 };
 
-void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
+static void media5200_irq_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int sub_virq, val;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 6301662..78ac19a 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -191,7 +191,7 @@
 	.irq_set_type = mpc52xx_gpt_irq_set_type,
 };
 
-void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc)
+static void mpc52xx_gpt_irq_cascade(struct irq_desc *desc)
 {
 	struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc);
 	int sub_virq;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 2944bc8..4fe2074 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -196,7 +196,7 @@
 	ctrl_reg |= (type << (22 - (l2irq * 2)));
 	out_be32(&intr->ctrl, ctrl_reg);
 
-	__irq_set_handler_locked(d->irq, handler);
+	irq_set_handler_locked(d, handler);
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 74861a7..60e89fc 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -78,7 +78,7 @@
 	.irq_disable = pq2ads_pci_mask_irq
 };
 
-static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void pq2ads_pci_irq_demux(struct irq_desc *desc)
 {
 	struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc);
 	u32 stat, mask, pend;
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 7bfb9b1..23791de 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -49,7 +49,7 @@
 	return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL);
 }
 #ifdef CONFIG_CPM2
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
+static void cpm2_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int cascade_irq;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index b0753e2..5ac70de 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -192,8 +192,7 @@
 }
 
 #ifdef CONFIG_PPC_I8259
-static void mpc85xx_8259_cascade_handler(unsigned int irq,
-					 struct irq_desc *desc)
+static void mpc85xx_8259_cascade_handler(struct irq_desc *desc)
 {
 	unsigned int cascade_irq = i8259_irq();
 
@@ -202,7 +201,7 @@
 		generic_handle_irq(cascade_irq);
 
 	/* check for any interrupts from the shared IRQ line */
-	handle_fasteoi_irq(irq, desc);
+	handle_fasteoi_irq(desc);
 }
 
 static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id)
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index ffdf021..f858306 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -46,7 +46,7 @@
 #endif
 
 #ifdef CONFIG_PPC_I8259
-static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void mpc85xx_8259_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 55a9682..b02d6a5 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -91,9 +91,10 @@
 			(irq_hw_number_t)i);
 }
 
-void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc)
+static void socrates_fpga_pic_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int irq = irq_desc_get_irq(desc);
 	unsigned int cascade_irq;
 
 	/*
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c
index d5b98c0..845defa 100644
--- a/arch/powerpc/platforms/86xx/pic.c
+++ b/arch/powerpc/platforms/86xx/pic.c
@@ -17,7 +17,7 @@
 #include <asm/i8259.h>
 
 #ifdef CONFIG_PPC_I8259
-static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void mpc86xx_8259_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index d303774..c289fc7 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -214,7 +214,7 @@
 	panic("Restart failed\n");
 }
 
-static void cpm_cascade(unsigned int irq, struct irq_desc *desc)
+static void cpm_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int cascade_irq = cpm_get_irq();
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 306888a..e0e68a1 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -93,7 +93,7 @@
 	dcr_write(msic->dcr_host, dcr_n, val);
 }
 
-static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
+static void axon_msi_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct axon_msic *msic = irq_desc_get_handler_data(desc);
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index a15f1ef..9f609fc 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -99,11 +99,12 @@
 {
 }
 
-static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
+static void iic_ioexc_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct cbe_iic_regs __iomem *node_iic =
 		(void __iomem *)irq_desc_get_handler_data(desc);
+	unsigned int irq = irq_desc_get_irq(desc);
 	unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
 	unsigned long bits, ack;
 	int cascade;
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 1f72f4a..9d27de6 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -199,7 +199,7 @@
 	.xlate = spider_host_xlate,
 };
 
-static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc)
+static void spider_irq_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct spider_pic *pic = irq_desc_get_handler_data(desc);
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 15ebc4e..987d1b8 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -363,7 +363,7 @@
 	if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
 }
 
-static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void chrp_8259_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 9dd154d..9b79757 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -120,8 +120,7 @@
 	return irq_linear_revmap(h, irq);
 }
 
-static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
-				      struct irq_desc *desc)
+static void hlwd_pic_irq_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct irq_domain *irq_domain = irq_desc_get_handler_data(desc);
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
index 1613303..8f65aa3 100644
--- a/arch/powerpc/platforms/embedded6xx/mvme5100.c
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -42,7 +42,7 @@
 static phys_addr_t pci_membase;
 static u_char *restart;
 
-static void mvme5100_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void mvme5100_8259_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c
index e66ef19..b304a9f 100644
--- a/arch/powerpc/platforms/pasemi/msi.c
+++ b/arch/powerpc/platforms/pasemi/msi.c
@@ -63,6 +63,7 @@
 static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
 {
 	struct msi_desc *entry;
+	irq_hw_number_t hwirq;
 
 	pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
 
@@ -70,10 +71,10 @@
 		if (entry->irq == NO_IRQ)
 			continue;
 
+		hwirq = virq_to_hw(entry->irq);
 		irq_set_msi_desc(entry->irq, NULL);
-		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
-				       virq_to_hw(entry->irq), ALLOC_CHUNK);
 		irq_dispose_mapping(entry->irq);
+		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK);
 	}
 
 	return;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 2927cd5..414fd1a 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2049,9 +2049,23 @@
 	struct iommu_table *tbl = NULL;
 	long rc;
 
+	/*
+	 * crashkernel= specifies the kdump kernel's maximum memory at
+	 * some offset and there is no guaranteed the result is a power
+	 * of 2, which will cause errors later.
+	 */
+	const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
+
+	/*
+	 * In memory constrained environments, e.g. kdump kernel, the
+	 * DMA window can be larger than available memory, which will
+	 * cause errors later.
+	 */
+	const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory);
+
 	rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
 			IOMMU_PAGE_SHIFT_4K,
-			pe->table_group.tce32_size,
+			window_size,
 			POWERNV_IOMMU_DEFAULT_LEVELS, &tbl);
 	if (rc) {
 		pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 9b2480b..f2dd772 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -99,6 +99,7 @@
 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
 	struct pnv_phb *phb = hose->private_data;
 	struct msi_desc *entry;
+	irq_hw_number_t hwirq;
 
 	if (WARN_ON(!phb))
 		return;
@@ -106,10 +107,10 @@
 	for_each_pci_msi_entry(entry, pdev) {
 		if (entry->irq == NO_IRQ)
 			continue;
+		hwirq = virq_to_hw(entry->irq);
 		irq_set_msi_desc(entry->irq, NULL);
-		msi_bitmap_free_hwirqs(&phb->msi_bmp,
-			virq_to_hw(entry->irq) - phb->msi_base, 1);
 		irq_dispose_mapping(entry->irq);
+		msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
 	}
 }
 #endif /* CONFIG_PCI_MSI */
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 47d9cebe..db17827 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -422,8 +422,10 @@
 
 	dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
 	of_node_put(parent);
-	if (!dn)
+	if (!dn) {
+		dlpar_release_drc(drc_index);
 		return -EINVAL;
+	}
 
 	rc = dlpar_attach_node(dn);
 	if (rc) {
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 39a74fa..9a83eb7 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -111,7 +111,7 @@
 		fwnmi_active = 1;
 }
 
-static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void pseries_8259_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index a11bd1d..9e86074 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -155,9 +155,9 @@
 
 	irqd_set_trigger_type(d, flow_type);
 	if (flow_type & IRQ_TYPE_LEVEL_LOW)
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	/* internal IRQ senses are LEVEL_LOW
 	 * EXT IRQ and Port C IRQ senses are programmable
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 5916da1..48a576a 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -128,15 +128,16 @@
 {
 	struct msi_desc *entry;
 	struct fsl_msi *msi_data;
+	irq_hw_number_t hwirq;
 
 	for_each_pci_msi_entry(entry, pdev) {
 		if (entry->irq == NO_IRQ)
 			continue;
+		hwirq = virq_to_hw(entry->irq);
 		msi_data = irq_get_chip_data(entry->irq);
 		irq_set_msi_desc(entry->irq, NULL);
-		msi_bitmap_free_hwirqs(&msi_data->bitmap,
-				       virq_to_hw(entry->irq), 1);
 		irq_dispose_mapping(entry->irq);
+		msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
 	}
 
 	return;
diff --git a/arch/powerpc/sysdev/ge/ge_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c
index 2bcb78b..d57b775 100644
--- a/arch/powerpc/sysdev/ge/ge_pic.c
+++ b/arch/powerpc/sysdev/ge/ge_pic.c
@@ -91,7 +91,7 @@
  * should be masked out.
  */
 
-void gef_pic_cascade(unsigned int irq, struct irq_desc *desc)
+static void gef_pic_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned int cascade_irq;
diff --git a/arch/powerpc/sysdev/ge/ge_pic.h b/arch/powerpc/sysdev/ge/ge_pic.h
index 908dbd9..5bf7e4b 100644
--- a/arch/powerpc/sysdev/ge/ge_pic.h
+++ b/arch/powerpc/sysdev/ge/ge_pic.h
@@ -1,8 +1,6 @@
 #ifndef __GEF_PIC_H__
 #define __GEF_PIC_H__
 
-
-void gef_pic_cascade(unsigned int, struct irq_desc *);
 unsigned int gef_pic_get_irq(void);
 void gef_pic_init(struct device_node *);
 
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 6b2b689..b1297ab 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -624,10 +624,10 @@
 
 	irqd_set_trigger_type(d, flow_type);
 	if (flow_type & IRQ_TYPE_LEVEL_LOW)  {
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		d->chip = &ipic_level_irq_chip;
 	} else {
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		d->chip = &ipic_edge_irq_chip;
 	}
 
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index d93a78be..9a42397 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -55,7 +55,7 @@
 		unsigned int siel = in_be32(&siu_reg->sc_siel);
 		siel |= mpc8xx_irqd_to_bit(d);
 		out_be32(&siu_reg->sc_siel, siel);
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	}
 	return 0;
 }
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 97a8ae8..537e5db 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1181,7 +1181,7 @@
 }
 
 /* IRQ handler for a secondary MPIC cascaded from another IRQ controller */
-static void mpic_cascade(unsigned int irq, struct irq_desc *desc)
+static void mpic_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct mpic *mpic = irq_desc_get_handler_data(desc);
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 70fbd56..2cbc7e2 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -107,15 +107,16 @@
 static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
 {
 	struct msi_desc *entry;
+	irq_hw_number_t hwirq;
 
 	for_each_pci_msi_entry(entry, pdev) {
 		if (entry->irq == NO_IRQ)
 			continue;
 
+		hwirq = virq_to_hw(entry->irq);
 		irq_set_msi_desc(entry->irq, NULL);
-		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
-				       virq_to_hw(entry->irq), 1);
 		irq_dispose_mapping(entry->irq);
+		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
 	}
 
 	return;
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 24d0470..8fb8061 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -124,16 +124,17 @@
 {
 	struct msi_desc *entry;
 	struct ppc4xx_msi *msi_data = &ppc4xx_msi;
+	irq_hw_number_t hwirq;
 
 	dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
 
 	for_each_pci_msi_entry(entry, dev) {
 		if (entry->irq == NO_IRQ)
 			continue;
+		hwirq = virq_to_hw(entry->irq);
 		irq_set_msi_desc(entry->irq, NULL);
-		msi_bitmap_free_hwirqs(&msi_data->bitmap,
-				virq_to_hw(entry->irq), 1);
 		irq_dispose_mapping(entry->irq);
+		msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
 	}
 }
 
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 47b352e..fbcc1f8 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -311,8 +311,8 @@
 }
 
 void __init qe_ic_init(struct device_node *node, unsigned int flags,
-		void (*low_handler)(unsigned int irq, struct irq_desc *desc),
-		void (*high_handler)(unsigned int irq, struct irq_desc *desc))
+		       void (*low_handler)(struct irq_desc *desc),
+		       void (*high_handler)(struct irq_desc *desc))
 {
 	struct qe_ic *qe_ic;
 	struct resource res;
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 57b5447..379de95 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -428,7 +428,7 @@
 	init_pci_source();
 }
 
-void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc)
+void tsi108_irq_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned int cascade_irq = get_pci_source();
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index d773453..6893d8f 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -194,7 +194,7 @@
 	.xlate	= irq_domain_xlate_twocell,
 };
 
-void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
+static void uic_irq_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct irq_data *idata = irq_desc_get_irq_data(desc);
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
index 11ac964..27c936c 100644
--- a/arch/powerpc/sysdev/xics/ics-opal.c
+++ b/arch/powerpc/sysdev/xics/ics-opal.c
@@ -54,7 +54,7 @@
 	if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
 		return;
 
-	server = xics_get_irq_server(d->irq, d->affinity, 0);
+	server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
 	server = ics_opal_mangle_server(server);
 
 	rc = opal_set_xive(hw_irq, server, DEFAULT_PRIORITY);
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
index d1c625c..3854dd4 100644
--- a/arch/powerpc/sysdev/xics/ics-rtas.c
+++ b/arch/powerpc/sysdev/xics/ics-rtas.c
@@ -47,7 +47,7 @@
 	if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
 		return;
 
-	server = xics_get_irq_server(d->irq, d->affinity, 0);
+	server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
 
 	call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server,
 				DEFAULT_PRIORITY);
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 43b8b27..0f52d79 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -222,7 +222,7 @@
 /*
  * Support code for cascading to 8259 interrupt controllers
  */
-static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void xilinx_i8259_cascade(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned int cascade_irq = i8259_irq();
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 1b0184a..92805d6 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -1,7 +1,6 @@
 # CONFIG_SWAP is not set
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
-CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_COMPAT_BRK is not set
@@ -54,10 +53,6 @@
 # CONFIG_MONWRITER is not set
 # CONFIG_S390_VMUR is not set
 # CONFIG_HID is not set
-CONFIG_MEMSTICK=y
-CONFIG_MEMSTICK_DEBUG=y
-CONFIG_MEMSTICK_UNSAFE_RESUME=y
-CONFIG_MSPRO_BLOCK=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 3d012e0..8ced426 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -35,6 +35,7 @@
  */
 #define KVM_NR_IRQCHIPS 1
 #define KVM_IRQCHIP_NUM_PINS 4096
+#define KVM_HALT_POLL_NS_DEFAULT 0
 
 #define SIGP_CTRL_C		0x80
 #define SIGP_CTRL_SCN_MASK	0x3f
@@ -210,6 +211,7 @@
 	u32 exit_validity;
 	u32 exit_instruction;
 	u32 halt_successful_poll;
+	u32 halt_attempted_poll;
 	u32 halt_wakeup;
 	u32 instruction_lctl;
 	u32 instruction_lctlg;
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 525cef7..02613ba 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -8,28 +8,8 @@
 
 #include <uapi/asm/unistd.h>
 
-
 #define __IGNORE_time
 
-/* Ignore system calls that are also reachable via sys_socketcall */
-#define __IGNORE_recvmmsg
-#define __IGNORE_sendmmsg
-#define __IGNORE_socket
-#define __IGNORE_socketpair
-#define __IGNORE_bind
-#define __IGNORE_connect
-#define __IGNORE_listen
-#define __IGNORE_accept4
-#define __IGNORE_getsockopt
-#define __IGNORE_setsockopt
-#define __IGNORE_getsockname
-#define __IGNORE_getpeername
-#define __IGNORE_sendto
-#define __IGNORE_sendmsg
-#define __IGNORE_recvfrom
-#define __IGNORE_recvmsg
-#define __IGNORE_shutdown
-
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_SYS_ALARM
 #define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 59d2bb4..a848adb 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -290,7 +290,26 @@
 #define __NR_s390_pci_mmio_write	352
 #define __NR_s390_pci_mmio_read		353
 #define __NR_execveat		354
-#define NR_syscalls 355
+#define __NR_userfaultfd	355
+#define __NR_membarrier		356
+#define __NR_recvmmsg		357
+#define __NR_sendmmsg		358
+#define __NR_socket		359
+#define __NR_socketpair		360
+#define __NR_bind		361
+#define __NR_connect		362
+#define __NR_listen		363
+#define __NR_accept4		364
+#define __NR_getsockopt		365
+#define __NR_setsockopt		366
+#define __NR_getsockname	367
+#define __NR_getpeername	368
+#define __NR_sendto		369
+#define __NR_sendmsg		370
+#define __NR_recvfrom		371
+#define __NR_recvmsg		372
+#define __NR_shutdown		373
+#define NR_syscalls 374
 
 /* 
  * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index eb46642..e0f9d27 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -48,6 +48,19 @@
 	struct ucontext32 uc;
 } rt_sigframe32;
 
+static inline void sigset_to_sigset32(unsigned long *set64,
+				      compat_sigset_word *set32)
+{
+	set32[0] = (compat_sigset_word) set64[0];
+	set32[1] = (compat_sigset_word)(set64[0] >> 32);
+}
+
+static inline void sigset32_to_sigset(compat_sigset_word *set32,
+				      unsigned long *set64)
+{
+	set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32);
+}
+
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
 {
 	int err;
@@ -281,10 +294,12 @@
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
+	compat_sigset_t cset;
 	sigset_t set;
 
-	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
+	if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
 		goto badframe;
+	sigset32_to_sigset(cset.sig, set.sig);
 	set_current_blocked(&set);
 	save_fpu_regs();
 	if (restore_sigregs32(regs, &frame->sregs))
@@ -302,10 +317,12 @@
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
+	compat_sigset_t cset;
 	sigset_t set;
 
-	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+	if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset)))
 		goto badframe;
+	sigset32_to_sigset(cset.sig, set.sig);
 	set_current_blocked(&set);
 	if (compat_restore_altstack(&frame->uc.uc_stack))
 		goto badframe;
@@ -377,7 +394,7 @@
 		return -EFAULT;
 
 	/* Create struct sigcontext32 on the signal stack */
-	memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32);
+	sigset_to_sigset32(set->sig, sc.oldmask);
 	sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
 	if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
 		return -EFAULT;
@@ -438,6 +455,7 @@
 static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
 			    struct pt_regs *regs)
 {
+	compat_sigset_t cset;
 	rt_sigframe32 __user *frame;
 	unsigned long restorer;
 	size_t frame_size;
@@ -485,11 +503,12 @@
 	store_sigregs();
 
 	/* Create ucontext on the signal stack. */
+	sigset_to_sigset32(set->sig, cset.sig);
 	if (__put_user(uc_flags, &frame->uc.uc_flags) ||
 	    __put_user(0, &frame->uc.uc_link) ||
 	    __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
 	    save_sigregs32(regs, &frame->uc.uc_mcontext) ||
-	    __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
+	    __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) ||
 	    save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
 		return -EFAULT;
 
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index f8498dd..09f1940 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -52,15 +52,13 @@
  * the regular system call wrappers.
  */
 #define COMPAT_SYSCALL_WRAPx(x, name, ...)					\
-	asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));		\
-	asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\
-	asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__))	\
-	{									\
-		return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__));	\
-	}
+asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));			\
+asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\
+asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__))	\
+{										\
+	return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__));		\
+}
 
-COMPAT_SYSCALL_WRAP1(exit, int, error_code);
-COMPAT_SYSCALL_WRAP1(close, unsigned int, fd);
 COMPAT_SYSCALL_WRAP2(creat, const char __user *, pathname, umode_t, mode);
 COMPAT_SYSCALL_WRAP2(link, const char __user *, oldname, const char __user *, newname);
 COMPAT_SYSCALL_WRAP1(unlink, const char __user *, pathname);
@@ -68,23 +66,16 @@
 COMPAT_SYSCALL_WRAP3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev);
 COMPAT_SYSCALL_WRAP2(chmod, const char __user *, filename, umode_t, mode);
 COMPAT_SYSCALL_WRAP1(oldumount, char __user *, name);
-COMPAT_SYSCALL_WRAP1(alarm, unsigned int, seconds);
 COMPAT_SYSCALL_WRAP2(access, const char __user *, filename, int, mode);
-COMPAT_SYSCALL_WRAP1(nice, int, increment);
-COMPAT_SYSCALL_WRAP2(kill, int, pid, int, sig);
 COMPAT_SYSCALL_WRAP2(rename, const char __user *, oldname, const char __user *, newname);
 COMPAT_SYSCALL_WRAP2(mkdir, const char __user *, pathname, umode_t, mode);
 COMPAT_SYSCALL_WRAP1(rmdir, const char __user *, pathname);
-COMPAT_SYSCALL_WRAP1(dup, unsigned int, fildes);
 COMPAT_SYSCALL_WRAP1(pipe, int __user *, fildes);
 COMPAT_SYSCALL_WRAP1(brk, unsigned long, brk);
 COMPAT_SYSCALL_WRAP2(signal, int, sig, __sighandler_t, handler);
 COMPAT_SYSCALL_WRAP1(acct, const char __user *, name);
 COMPAT_SYSCALL_WRAP2(umount, char __user *, name, int, flags);
-COMPAT_SYSCALL_WRAP2(setpgid, pid_t, pid, pid_t, pgid);
-COMPAT_SYSCALL_WRAP1(umask, int, mask);
 COMPAT_SYSCALL_WRAP1(chroot, const char __user *, filename);
-COMPAT_SYSCALL_WRAP2(dup2, unsigned int, oldfd, unsigned int, newfd);
 COMPAT_SYSCALL_WRAP3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask);
 COMPAT_SYSCALL_WRAP2(sethostname, char __user *, name, int, len);
 COMPAT_SYSCALL_WRAP2(symlink, const char __user *, old, const char __user *, new);
@@ -93,37 +84,23 @@
 COMPAT_SYSCALL_WRAP2(swapon, const char __user *, specialfile, int, swap_flags);
 COMPAT_SYSCALL_WRAP4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg);
 COMPAT_SYSCALL_WRAP2(munmap, unsigned long, addr, size_t, len);
-COMPAT_SYSCALL_WRAP2(fchmod, unsigned int, fd, umode_t, mode);
-COMPAT_SYSCALL_WRAP2(getpriority, int, which, int, who);
-COMPAT_SYSCALL_WRAP3(setpriority, int, which, int, who, int, niceval);
 COMPAT_SYSCALL_WRAP3(syslog, int, type, char __user *, buf, int, len);
 COMPAT_SYSCALL_WRAP1(swapoff, const char __user *, specialfile);
-COMPAT_SYSCALL_WRAP1(fsync, unsigned int, fd);
 COMPAT_SYSCALL_WRAP2(setdomainname, char __user *, name, int, len);
 COMPAT_SYSCALL_WRAP1(newuname, struct new_utsname __user *, name);
 COMPAT_SYSCALL_WRAP3(mprotect, unsigned long, start, size_t, len, unsigned long, prot);
 COMPAT_SYSCALL_WRAP3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs);
 COMPAT_SYSCALL_WRAP2(delete_module, const char __user *, name_user, unsigned int, flags);
 COMPAT_SYSCALL_WRAP4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr);
-COMPAT_SYSCALL_WRAP1(getpgid, pid_t, pid);
-COMPAT_SYSCALL_WRAP1(fchdir, unsigned int, fd);
 COMPAT_SYSCALL_WRAP2(bdflush, int, func, long, data);
 COMPAT_SYSCALL_WRAP3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2);
-COMPAT_SYSCALL_WRAP1(s390_personality, unsigned int, personality);
 COMPAT_SYSCALL_WRAP5(llseek, unsigned int, fd, unsigned long, high, unsigned long, low, loff_t __user *, result, unsigned int, whence);
-COMPAT_SYSCALL_WRAP2(flock, unsigned int, fd, unsigned int, cmd);
 COMPAT_SYSCALL_WRAP3(msync, unsigned long, start, size_t, len, int, flags);
-COMPAT_SYSCALL_WRAP1(getsid, pid_t, pid);
-COMPAT_SYSCALL_WRAP1(fdatasync, unsigned int, fd);
 COMPAT_SYSCALL_WRAP2(mlock, unsigned long, start, size_t, len);
 COMPAT_SYSCALL_WRAP2(munlock, unsigned long, start, size_t, len);
-COMPAT_SYSCALL_WRAP1(mlockall, int, flags);
 COMPAT_SYSCALL_WRAP2(sched_setparam, pid_t, pid, struct sched_param __user *, param);
 COMPAT_SYSCALL_WRAP2(sched_getparam, pid_t, pid, struct sched_param __user *, param);
 COMPAT_SYSCALL_WRAP3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param);
-COMPAT_SYSCALL_WRAP1(sched_getscheduler, pid_t, pid);
-COMPAT_SYSCALL_WRAP1(sched_get_priority_max, int, policy);
-COMPAT_SYSCALL_WRAP1(sched_get_priority_min, int, policy);
 COMPAT_SYSCALL_WRAP5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr);
 COMPAT_SYSCALL_WRAP3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout);
 COMPAT_SYSCALL_WRAP5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5);
@@ -131,20 +108,11 @@
 COMPAT_SYSCALL_WRAP2(capget, cap_user_header_t, header, cap_user_data_t, dataptr);
 COMPAT_SYSCALL_WRAP2(capset, cap_user_header_t, header, const cap_user_data_t, data);
 COMPAT_SYSCALL_WRAP3(lchown, const char __user *, filename, uid_t, user, gid_t, group);
-COMPAT_SYSCALL_WRAP2(setreuid, uid_t, ruid, uid_t, euid);
-COMPAT_SYSCALL_WRAP2(setregid, gid_t, rgid, gid_t, egid);
 COMPAT_SYSCALL_WRAP2(getgroups, int, gidsetsize, gid_t __user *, grouplist);
 COMPAT_SYSCALL_WRAP2(setgroups, int, gidsetsize, gid_t __user *, grouplist);
-COMPAT_SYSCALL_WRAP3(fchown, unsigned int, fd, uid_t, user, gid_t, group);
-COMPAT_SYSCALL_WRAP3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid);
 COMPAT_SYSCALL_WRAP3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid);
-COMPAT_SYSCALL_WRAP3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid);
 COMPAT_SYSCALL_WRAP3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid);
 COMPAT_SYSCALL_WRAP3(chown, const char __user *, filename, uid_t, user, gid_t, group);
-COMPAT_SYSCALL_WRAP1(setuid, uid_t, uid);
-COMPAT_SYSCALL_WRAP1(setgid, gid_t, gid);
-COMPAT_SYSCALL_WRAP1(setfsuid, uid_t, uid);
-COMPAT_SYSCALL_WRAP1(setfsgid, gid_t, gid);
 COMPAT_SYSCALL_WRAP2(pivot_root, const char __user *, new_root, const char __user *, put_old);
 COMPAT_SYSCALL_WRAP3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec);
 COMPAT_SYSCALL_WRAP3(madvise, unsigned long, start, size_t, len, int, behavior);
@@ -161,23 +129,16 @@
 COMPAT_SYSCALL_WRAP2(removexattr, const char __user *, path, const char __user *, name);
 COMPAT_SYSCALL_WRAP2(lremovexattr, const char __user *, path, const char __user *, name);
 COMPAT_SYSCALL_WRAP2(fremovexattr, int, fd, const char __user *, name);
-COMPAT_SYSCALL_WRAP1(exit_group, int, error_code);
 COMPAT_SYSCALL_WRAP1(set_tid_address, int __user *, tidptr);
-COMPAT_SYSCALL_WRAP1(epoll_create, int, size);
 COMPAT_SYSCALL_WRAP4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event);
 COMPAT_SYSCALL_WRAP4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout);
-COMPAT_SYSCALL_WRAP1(timer_getoverrun, timer_t, timer_id);
-COMPAT_SYSCALL_WRAP1(timer_delete, compat_timer_t, compat_timer_id);
 COMPAT_SYSCALL_WRAP1(io_destroy, aio_context_t, ctx);
 COMPAT_SYSCALL_WRAP3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result);
 COMPAT_SYSCALL_WRAP1(mq_unlink, const char __user *, name);
 COMPAT_SYSCALL_WRAP5(add_key, const char __user *, tp, const char __user *, dsc, const void __user *, pld, size_t, len, key_serial_t, id);
 COMPAT_SYSCALL_WRAP4(request_key, const char __user *, tp, const char __user *, dsc, const char __user *, info, key_serial_t, id);
 COMPAT_SYSCALL_WRAP5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags);
-COMPAT_SYSCALL_WRAP3(ioprio_set, int, which, int, who, int, ioprio);
-COMPAT_SYSCALL_WRAP2(ioprio_get, int, which, int, who);
 COMPAT_SYSCALL_WRAP3(inotify_add_watch, int, fd, const char __user *, path, u32, mask);
-COMPAT_SYSCALL_WRAP2(inotify_rm_watch, int, fd, __s32, wd);
 COMPAT_SYSCALL_WRAP3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode);
 COMPAT_SYSCALL_WRAP4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev);
 COMPAT_SYSCALL_WRAP5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag);
@@ -192,23 +153,11 @@
 COMPAT_SYSCALL_WRAP6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
 COMPAT_SYSCALL_WRAP4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags);
 COMPAT_SYSCALL_WRAP3(getcpu, unsigned __user *, cpu, unsigned __user *, node, struct getcpu_cache __user *, cache);
-COMPAT_SYSCALL_WRAP1(eventfd, unsigned int, count);
-COMPAT_SYSCALL_WRAP2(timerfd_create, int, clockid, int, flags);
-COMPAT_SYSCALL_WRAP2(eventfd2, unsigned int, count, int, flags);
-COMPAT_SYSCALL_WRAP1(inotify_init1, int, flags);
 COMPAT_SYSCALL_WRAP2(pipe2, int __user *, fildes, int, flags);
-COMPAT_SYSCALL_WRAP3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags);
-COMPAT_SYSCALL_WRAP1(epoll_create1, int, flags);
-COMPAT_SYSCALL_WRAP2(tkill, int, pid, int, sig);
-COMPAT_SYSCALL_WRAP3(tgkill, int, tgid, int, pid, int, sig);
 COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags);
 COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls);
-COMPAT_SYSCALL_WRAP2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags);
 COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim);
 COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag);
-COMPAT_SYSCALL_WRAP1(syncfs, int, fd);
-COMPAT_SYSCALL_WRAP2(setns, int, fd, int, nstype);
-COMPAT_SYSCALL_WRAP2(s390_runtime_instr, int, command, int, signum);
 COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2);
 COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags);
 COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags);
@@ -220,3 +169,10 @@
 COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size);
 COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length);
 COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length);
+COMPAT_SYSCALL_WRAP4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec);
+COMPAT_SYSCALL_WRAP3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen);
+COMPAT_SYSCALL_WRAP3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen);
+COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags);
+COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
+COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
+COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 247b7aa..09b039d 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1191,6 +1191,7 @@
 	clg	%r9,BASED(.Lcleanup_save_fpu_fpc_end)
 	jhe	1f
 	lg	%r2,__LC_CURRENT
+	aghi	%r2,__TASK_thread
 0:	# Store floating-point controls
 	stfpc	__THREAD_FPU_fpc(%r2)
 1:	# Load register save area and check if VX is active
@@ -1252,6 +1253,7 @@
 	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl)
 	jhe	6f
 	lg	%r4,__LC_CURRENT
+	aghi	%r4,__TASK_thread
 	lfpc	__THREAD_FPU_fpc(%r4)
 	tm	__THREAD_FPU_flags+3(%r4),FPU_USE_VX	# VX-enabled task ?
 	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 56fdad4..a956340 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -157,10 +157,14 @@
 
 	cpuhw = &get_cpu_var(cpu_hw_events);
 
-	/* check authorization for cpu counter sets */
+	/* Check authorization for cpu counter sets.
+	 * If the particular CPU counter set is not authorized,
+	 * return with -ENOENT in order to fall back to other
+	 * PMUs that might suffice the event request.
+	 */
 	ctrs_state = cpumf_state_ctl[hwc->config_base];
 	if (!(ctrs_state & cpuhw->info.auth_ctl))
-		err = -EPERM;
+		err = -ENOENT;
 
 	put_cpu_var(cpu_hw_events);
 	return err;
@@ -536,7 +540,7 @@
 	 */
 	if (!(cpuhw->flags & PERF_EVENT_TXN))
 		if (validate_ctr_auth(&event->hw))
-			return -EPERM;
+			return -ENOENT;
 
 	ctr_set_enable(&cpuhw->state, event->hw.config_base);
 	event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
@@ -611,7 +615,7 @@
 	state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
 	state >>= CPUMF_LCCTL_ENABLE_SHIFT;
 	if ((state & cpuhw->info.auth_ctl) != state)
-		return -EPERM;
+		return -ENOENT;
 
 	cpuhw->flags &= ~PERF_EVENT_TXN;
 	perf_pmu_enable(pmu);
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index ca62946..2d6b6e8 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -30,6 +30,9 @@
 	aghi	%r15,-STACK_FRAME_OVERHEAD
 	stg	%r1,__SF_BACKCHAIN(%r15)
 
+	/* Store FPU registers */
+	brasl	%r14,save_fpu_regs
+
 	/* Deactivate DAT */
 	stnsm	__SF_EMPTY(%r15),0xfb
 
@@ -47,23 +50,6 @@
 
 	/* Store registers */
 	mvc	0x318(4,%r1),__SF_EMPTY(%r15)	/* move prefix to lowcore */
-	stfpc	0x31c(%r1)			/* store fpu control */
-	std	0,0x200(%r1)			/* store f0 */
-	std	1,0x208(%r1)			/* store f1 */
-	std	2,0x210(%r1)			/* store f2 */
-	std	3,0x218(%r1)			/* store f3 */
-	std	4,0x220(%r1)			/* store f4 */
-	std	5,0x228(%r1)			/* store f5 */
-	std	6,0x230(%r1)			/* store f6 */
-	std	7,0x238(%r1)			/* store f7 */
-	std	8,0x240(%r1)			/* store f8 */
-	std	9,0x248(%r1)			/* store f9 */
-	std	10,0x250(%r1)			/* store f10 */
-	std	11,0x258(%r1)			/* store f11 */
-	std	12,0x260(%r1)			/* store f12 */
-	std	13,0x268(%r1)			/* store f13 */
-	std	14,0x270(%r1)			/* store f14 */
-	std	15,0x278(%r1)			/* store f15 */
 	stam	%a0,%a15,0x340(%r1)		/* store access registers */
 	stctg	%c0,%c15,0x380(%r1)		/* store control registers */
 	stmg	%r0,%r15,0x280(%r1)		/* store general registers */
@@ -249,24 +235,6 @@
 	lctlg	%c0,%c15,0x380(%r13)	/* load control registers */
 	lam	%a0,%a15,0x340(%r13)	/* load access registers */
 
-	lfpc	0x31c(%r13)		/* load fpu control */
-	ld	0,0x200(%r13)		/* load f0 */
-	ld	1,0x208(%r13)		/* load f1 */
-	ld	2,0x210(%r13)		/* load f2 */
-	ld	3,0x218(%r13)		/* load f3 */
-	ld	4,0x220(%r13)		/* load f4 */
-	ld	5,0x228(%r13)		/* load f5 */
-	ld	6,0x230(%r13)		/* load f6 */
-	ld	7,0x238(%r13)		/* load f7 */
-	ld	8,0x240(%r13)		/* load f8 */
-	ld	9,0x248(%r13)		/* load f9 */
-	ld	10,0x250(%r13)		/* load f10 */
-	ld	11,0x258(%r13)		/* load f11 */
-	ld	12,0x260(%r13)		/* load f12 */
-	ld	13,0x268(%r13)		/* load f13 */
-	ld	14,0x270(%r13)		/* load f14 */
-	ld	15,0x278(%r13)		/* load f15 */
-
 	/* Load old stack */
 	lg	%r15,0x2f8(%r13)
 
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index f3f4a13..8c56929 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -9,12 +9,12 @@
 #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall)
 
 NI_SYSCALL						/* 0 */
-SYSCALL(sys_exit,compat_sys_exit)
+SYSCALL(sys_exit,sys_exit)
 SYSCALL(sys_fork,sys_fork)
 SYSCALL(sys_read,compat_sys_s390_read)
 SYSCALL(sys_write,compat_sys_s390_write)
 SYSCALL(sys_open,compat_sys_open)			/* 5 */
-SYSCALL(sys_close,compat_sys_close)
+SYSCALL(sys_close,sys_close)
 SYSCALL(sys_restart_syscall,sys_restart_syscall)
 SYSCALL(sys_creat,compat_sys_creat)
 SYSCALL(sys_link,compat_sys_link)
@@ -35,21 +35,21 @@
 SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16)	/* old getuid16 syscall*/
 SYSCALL(sys_ni_syscall,compat_sys_stime)		/* 25 old stime syscall */
 SYSCALL(sys_ptrace,compat_sys_ptrace)
-SYSCALL(sys_alarm,compat_sys_alarm)
+SYSCALL(sys_alarm,sys_alarm)
 NI_SYSCALL						/* old fstat syscall */
 SYSCALL(sys_pause,sys_pause)
 SYSCALL(sys_utime,compat_sys_utime)			/* 30 */
 NI_SYSCALL						/* old stty syscall */
 NI_SYSCALL						/* old gtty syscall */
 SYSCALL(sys_access,compat_sys_access)
-SYSCALL(sys_nice,compat_sys_nice)
+SYSCALL(sys_nice,sys_nice)
 NI_SYSCALL						/* 35 old ftime syscall */
 SYSCALL(sys_sync,sys_sync)
-SYSCALL(sys_kill,compat_sys_kill)
+SYSCALL(sys_kill,sys_kill)
 SYSCALL(sys_rename,compat_sys_rename)
 SYSCALL(sys_mkdir,compat_sys_mkdir)
 SYSCALL(sys_rmdir,compat_sys_rmdir)			/* 40 */
-SYSCALL(sys_dup,compat_sys_dup)
+SYSCALL(sys_dup,sys_dup)
 SYSCALL(sys_pipe,compat_sys_pipe)
 SYSCALL(sys_times,compat_sys_times)
 NI_SYSCALL						/* old prof syscall */
@@ -65,13 +65,13 @@
 SYSCALL(sys_ioctl,compat_sys_ioctl)
 SYSCALL(sys_fcntl,compat_sys_fcntl)			/* 55 */
 NI_SYSCALL						/* intel mpx syscall */
-SYSCALL(sys_setpgid,compat_sys_setpgid)
+SYSCALL(sys_setpgid,sys_setpgid)
 NI_SYSCALL						/* old ulimit syscall */
 NI_SYSCALL						/* old uname syscall */
-SYSCALL(sys_umask,compat_sys_umask)			/* 60 */
+SYSCALL(sys_umask,sys_umask)				/* 60 */
 SYSCALL(sys_chroot,compat_sys_chroot)
 SYSCALL(sys_ustat,compat_sys_ustat)
-SYSCALL(sys_dup2,compat_sys_dup2)
+SYSCALL(sys_dup2,sys_dup2)
 SYSCALL(sys_getppid,sys_getppid)
 SYSCALL(sys_getpgrp,sys_getpgrp)			/* 65 */
 SYSCALL(sys_setsid,sys_setsid)
@@ -102,10 +102,10 @@
 SYSCALL(sys_munmap,compat_sys_munmap)
 SYSCALL(sys_truncate,compat_sys_truncate)
 SYSCALL(sys_ftruncate,compat_sys_ftruncate)
-SYSCALL(sys_fchmod,compat_sys_fchmod)
+SYSCALL(sys_fchmod,sys_fchmod)
 SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16)	/* 95 old fchown16 syscall*/
-SYSCALL(sys_getpriority,compat_sys_getpriority)
-SYSCALL(sys_setpriority,compat_sys_setpriority)
+SYSCALL(sys_getpriority,sys_getpriority)
+SYSCALL(sys_setpriority,sys_setpriority)
 NI_SYSCALL						/* old profil syscall */
 SYSCALL(sys_statfs,compat_sys_statfs)
 SYSCALL(sys_fstatfs,compat_sys_fstatfs)			/* 100 */
@@ -126,7 +126,7 @@
 SYSCALL(sys_swapoff,compat_sys_swapoff)			/* 115 */
 SYSCALL(sys_sysinfo,compat_sys_sysinfo)
 SYSCALL(sys_s390_ipc,compat_sys_s390_ipc)
-SYSCALL(sys_fsync,compat_sys_fsync)
+SYSCALL(sys_fsync,sys_fsync)
 SYSCALL(sys_sigreturn,compat_sys_sigreturn)
 SYSCALL(sys_clone,compat_sys_clone)			/* 120 */
 SYSCALL(sys_setdomainname,compat_sys_setdomainname)
@@ -140,35 +140,35 @@
 SYSCALL(sys_delete_module,compat_sys_delete_module)
 NI_SYSCALL						/* 130: old get_kernel_syms */
 SYSCALL(sys_quotactl,compat_sys_quotactl)
-SYSCALL(sys_getpgid,compat_sys_getpgid)
-SYSCALL(sys_fchdir,compat_sys_fchdir)
+SYSCALL(sys_getpgid,sys_getpgid)
+SYSCALL(sys_fchdir,sys_fchdir)
 SYSCALL(sys_bdflush,compat_sys_bdflush)
 SYSCALL(sys_sysfs,compat_sys_sysfs)			/* 135 */
-SYSCALL(sys_s390_personality,compat_sys_s390_personality)
+SYSCALL(sys_s390_personality,sys_s390_personality)
 NI_SYSCALL						/* for afs_syscall */
 SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16)	/* old setfsuid16 syscall */
 SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16)	/* old setfsgid16 syscall */
 SYSCALL(sys_llseek,compat_sys_llseek)			/* 140 */
 SYSCALL(sys_getdents,compat_sys_getdents)
 SYSCALL(sys_select,compat_sys_select)
-SYSCALL(sys_flock,compat_sys_flock)
+SYSCALL(sys_flock,sys_flock)
 SYSCALL(sys_msync,compat_sys_msync)
 SYSCALL(sys_readv,compat_sys_readv)			/* 145 */
 SYSCALL(sys_writev,compat_sys_writev)
-SYSCALL(sys_getsid,compat_sys_getsid)
-SYSCALL(sys_fdatasync,compat_sys_fdatasync)
+SYSCALL(sys_getsid,sys_getsid)
+SYSCALL(sys_fdatasync,sys_fdatasync)
 SYSCALL(sys_sysctl,compat_sys_sysctl)
 SYSCALL(sys_mlock,compat_sys_mlock)			/* 150 */
 SYSCALL(sys_munlock,compat_sys_munlock)
-SYSCALL(sys_mlockall,compat_sys_mlockall)
+SYSCALL(sys_mlockall,sys_mlockall)
 SYSCALL(sys_munlockall,sys_munlockall)
 SYSCALL(sys_sched_setparam,compat_sys_sched_setparam)
 SYSCALL(sys_sched_getparam,compat_sys_sched_getparam)	/* 155 */
 SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler)
-SYSCALL(sys_sched_getscheduler,compat_sys_sched_getscheduler)
+SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler)
 SYSCALL(sys_sched_yield,sys_sched_yield)
-SYSCALL(sys_sched_get_priority_max,compat_sys_sched_get_priority_max)
-SYSCALL(sys_sched_get_priority_min,compat_sys_sched_get_priority_min)	/* 160 */
+SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max)
+SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min)	/* 160 */
 SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval)
 SYSCALL(sys_nanosleep,compat_sys_nanosleep)
 SYSCALL(sys_mremap,compat_sys_mremap)
@@ -211,20 +211,20 @@
 SYSCALL(sys_getgid,sys_getgid)				/* 200 */
 SYSCALL(sys_geteuid,sys_geteuid)
 SYSCALL(sys_getegid,sys_getegid)
-SYSCALL(sys_setreuid,compat_sys_setreuid)
-SYSCALL(sys_setregid,compat_sys_setregid)
+SYSCALL(sys_setreuid,sys_setreuid)
+SYSCALL(sys_setregid,sys_setregid)
 SYSCALL(sys_getgroups,compat_sys_getgroups)		/* 205 */
 SYSCALL(sys_setgroups,compat_sys_setgroups)
-SYSCALL(sys_fchown,compat_sys_fchown)
-SYSCALL(sys_setresuid,compat_sys_setresuid)
+SYSCALL(sys_fchown,sys_fchown)
+SYSCALL(sys_setresuid,sys_setresuid)
 SYSCALL(sys_getresuid,compat_sys_getresuid)
-SYSCALL(sys_setresgid,compat_sys_setresgid)		/* 210 */
+SYSCALL(sys_setresgid,sys_setresgid)			/* 210 */
 SYSCALL(sys_getresgid,compat_sys_getresgid)
 SYSCALL(sys_chown,compat_sys_chown)
-SYSCALL(sys_setuid,compat_sys_setuid)
-SYSCALL(sys_setgid,compat_sys_setgid)
-SYSCALL(sys_setfsuid,compat_sys_setfsuid)		/* 215 */
-SYSCALL(sys_setfsgid,compat_sys_setfsgid)
+SYSCALL(sys_setuid,sys_setuid)
+SYSCALL(sys_setgid,sys_setgid)
+SYSCALL(sys_setfsuid,sys_setfsuid)			/* 215 */
+SYSCALL(sys_setfsgid,sys_setfsgid)
 SYSCALL(sys_pivot_root,compat_sys_pivot_root)
 SYSCALL(sys_mincore,compat_sys_mincore)
 SYSCALL(sys_madvise,compat_sys_madvise)
@@ -245,19 +245,19 @@
 SYSCALL(sys_lremovexattr,compat_sys_lremovexattr)
 SYSCALL(sys_fremovexattr,compat_sys_fremovexattr)	/* 235 */
 SYSCALL(sys_gettid,sys_gettid)
-SYSCALL(sys_tkill,compat_sys_tkill)
+SYSCALL(sys_tkill,sys_tkill)
 SYSCALL(sys_futex,compat_sys_futex)
 SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity)
 SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity)	/* 240 */
-SYSCALL(sys_tgkill,compat_sys_tgkill)
+SYSCALL(sys_tgkill,sys_tgkill)
 NI_SYSCALL						/* reserved for TUX */
 SYSCALL(sys_io_setup,compat_sys_io_setup)
 SYSCALL(sys_io_destroy,compat_sys_io_destroy)
 SYSCALL(sys_io_getevents,compat_sys_io_getevents)	/* 245 */
 SYSCALL(sys_io_submit,compat_sys_io_submit)
 SYSCALL(sys_io_cancel,compat_sys_io_cancel)
-SYSCALL(sys_exit_group,compat_sys_exit_group)
-SYSCALL(sys_epoll_create,compat_sys_epoll_create)
+SYSCALL(sys_exit_group,sys_exit_group)
+SYSCALL(sys_epoll_create,sys_epoll_create)
 SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl)		/* 250 */
 SYSCALL(sys_epoll_wait,compat_sys_epoll_wait)
 SYSCALL(sys_set_tid_address,compat_sys_set_tid_address)
@@ -265,8 +265,8 @@
 SYSCALL(sys_timer_create,compat_sys_timer_create)
 SYSCALL(sys_timer_settime,compat_sys_timer_settime)	/* 255 */
 SYSCALL(sys_timer_gettime,compat_sys_timer_gettime)
-SYSCALL(sys_timer_getoverrun,compat_sys_timer_getoverrun)
-SYSCALL(sys_timer_delete,compat_sys_timer_delete)
+SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun)
+SYSCALL(sys_timer_delete,sys_timer_delete)
 SYSCALL(sys_clock_settime,compat_sys_clock_settime)
 SYSCALL(sys_clock_gettime,compat_sys_clock_gettime)	/* 260 */
 SYSCALL(sys_clock_getres,compat_sys_clock_getres)
@@ -290,11 +290,11 @@
 SYSCALL(sys_request_key,compat_sys_request_key)
 SYSCALL(sys_keyctl,compat_sys_keyctl)			/* 280 */
 SYSCALL(sys_waitid,compat_sys_waitid)
-SYSCALL(sys_ioprio_set,compat_sys_ioprio_set)
-SYSCALL(sys_ioprio_get,compat_sys_ioprio_get)
+SYSCALL(sys_ioprio_set,sys_ioprio_set)
+SYSCALL(sys_ioprio_get,sys_ioprio_get)
 SYSCALL(sys_inotify_init,sys_inotify_init)
 SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch)	/* 285 */
-SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch)
+SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch)
 SYSCALL(sys_migrate_pages,compat_sys_migrate_pages)
 SYSCALL(sys_openat,compat_sys_openat)
 SYSCALL(sys_mkdirat,compat_sys_mkdirat)
@@ -326,31 +326,31 @@
 SYSCALL(sys_utimensat,compat_sys_utimensat)		/* 315 */
 SYSCALL(sys_signalfd,compat_sys_signalfd)
 NI_SYSCALL						/* 317 old sys_timer_fd */
-SYSCALL(sys_eventfd,compat_sys_eventfd)
-SYSCALL(sys_timerfd_create,compat_sys_timerfd_create)
+SYSCALL(sys_eventfd,sys_eventfd)
+SYSCALL(sys_timerfd_create,sys_timerfd_create)
 SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
 SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime)
 SYSCALL(sys_signalfd4,compat_sys_signalfd4)
-SYSCALL(sys_eventfd2,compat_sys_eventfd2)
-SYSCALL(sys_inotify_init1,compat_sys_inotify_init1)
+SYSCALL(sys_eventfd2,sys_eventfd2)
+SYSCALL(sys_inotify_init1,sys_inotify_init1)
 SYSCALL(sys_pipe2,compat_sys_pipe2)			/* 325 */
-SYSCALL(sys_dup3,compat_sys_dup3)
-SYSCALL(sys_epoll_create1,compat_sys_epoll_create1)
+SYSCALL(sys_dup3,sys_dup3)
+SYSCALL(sys_epoll_create1,sys_epoll_create1)
 SYSCALL(sys_preadv,compat_sys_preadv)
 SYSCALL(sys_pwritev,compat_sys_pwritev)
 SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
 SYSCALL(sys_perf_event_open,compat_sys_perf_event_open)
-SYSCALL(sys_fanotify_init,compat_sys_fanotify_init)
+SYSCALL(sys_fanotify_init,sys_fanotify_init)
 SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark)
 SYSCALL(sys_prlimit64,compat_sys_prlimit64)
 SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */
 SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at)
 SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime)
-SYSCALL(sys_syncfs,compat_sys_syncfs)
-SYSCALL(sys_setns,compat_sys_setns)
+SYSCALL(sys_syncfs,sys_syncfs)
+SYSCALL(sys_setns,sys_setns)
 SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */
 SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev)
-SYSCALL(sys_s390_runtime_instr,compat_sys_s390_runtime_instr)
+SYSCALL(sys_s390_runtime_instr,sys_s390_runtime_instr)
 SYSCALL(sys_kcmp,compat_sys_kcmp)
 SYSCALL(sys_finit_module,compat_sys_finit_module)
 SYSCALL(sys_sched_setattr,compat_sys_sched_setattr)	/* 345 */
@@ -363,3 +363,22 @@
 SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
 SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
 SYSCALL(sys_execveat,compat_sys_execveat)
+SYSCALL(sys_userfaultfd,sys_userfaultfd)		/* 355 */
+SYSCALL(sys_membarrier,sys_membarrier)
+SYSCALL(sys_recvmmsg,compat_sys_recvmmsg)
+SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
+SYSCALL(sys_socket,sys_socket)
+SYSCALL(sys_socketpair,compat_sys_socketpair)		/* 360 */
+SYSCALL(sys_bind,sys_bind)
+SYSCALL(sys_connect,sys_connect)
+SYSCALL(sys_listen,sys_listen)
+SYSCALL(sys_accept4,sys_accept4)
+SYSCALL(sys_getsockopt,compat_sys_getsockopt)		/* 365 */
+SYSCALL(sys_setsockopt,compat_sys_setsockopt)
+SYSCALL(sys_getsockname,compat_sys_getsockname)
+SYSCALL(sys_getpeername,compat_sys_getpeername)
+SYSCALL(sys_sendto,compat_sys_sendto)
+SYSCALL(sys_sendmsg,compat_sys_sendmsg)			/* 370 */
+SYSCALL(sys_recvfrom,compat_sys_recvfrom)
+SYSCALL(sys_recvmsg,compat_sys_recvmsg)
+SYSCALL(sys_shutdown,sys_shutdown)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index b9ce650..c865343 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -89,17 +89,21 @@
 	if (smp_cpu_mtid &&
 	    time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) {
 		u64 cycles_new[32], *cycles_old;
-		u64 delta, mult, div;
+		u64 delta, fac, mult, div;
 
 		cycles_old = this_cpu_ptr(mt_cycles);
 		if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
+			fac = 1;
 			mult = div = 0;
 			for (i = 0; i <= smp_cpu_mtid; i++) {
 				delta = cycles_new[i] - cycles_old[i];
-				mult += delta;
-				div += (i + 1) * delta;
+				div += delta;
+				mult *= i + 1;
+				mult += delta * fac;
+				fac *= i + 1;
 			}
-			if (mult > 0) {
+			div *= fac;
+			if (div > 0) {
 				/* Update scaling factor */
 				__this_cpu_write(mt_scaling_mult, mult);
 				__this_cpu_write(mt_scaling_div, div);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c91eb94..0a67c40 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -63,6 +63,7 @@
 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
+	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
@@ -1574,7 +1575,7 @@
 
 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
 {
-	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
 }
 
 /*
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index eeda051..9a0c4c2 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1310,7 +1310,7 @@
 	if (jit.prg_buf) {
 		set_memory_ro((unsigned long)header, header->pages);
 		fp->bpf_func = (void *) jit.prg_buf;
-		fp->jited = true;
+		fp->jited = 1;
 	}
 free_addrs:
 	kfree(jit.addrs);
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c
index 6f97a8f..6129aef 100644
--- a/arch/sh/boards/mach-se/7343/irq.c
+++ b/arch/sh/boards/mach-se/7343/irq.c
@@ -29,7 +29,7 @@
 static void __iomem *se7343_irq_regs;
 struct irq_domain *se7343_irq_domain;
 
-static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void se7343_irq_demux(struct irq_desc *desc)
 {
 	struct irq_data *data = irq_desc_get_irq_data(desc);
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c
index 60aebd1..24c74a8 100644
--- a/arch/sh/boards/mach-se/7722/irq.c
+++ b/arch/sh/boards/mach-se/7722/irq.c
@@ -28,7 +28,7 @@
 static void __iomem *se7722_irq_regs;
 struct irq_domain *se7722_irq_domain;
 
-static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void se7722_irq_demux(struct irq_desc *desc)
 {
 	struct irq_data *data = irq_desc_get_irq_data(desc);
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
diff --git a/arch/sh/boards/mach-se/7724/irq.c b/arch/sh/boards/mach-se/7724/irq.c
index 9f20338..64e681e 100644
--- a/arch/sh/boards/mach-se/7724/irq.c
+++ b/arch/sh/boards/mach-se/7724/irq.c
@@ -92,7 +92,7 @@
 	.irq_unmask	= enable_se7724_irq,
 };
 
-static void se7724_irq_demux(unsigned int __irq, struct irq_desc *desc)
+static void se7724_irq_demux(struct irq_desc *desc)
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 	struct fpga_irq set = get_fpga_irq(irq);
diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c
index 24555c3..1fb2cbe 100644
--- a/arch/sh/boards/mach-x3proto/gpio.c
+++ b/arch/sh/boards/mach-x3proto/gpio.c
@@ -60,7 +60,7 @@
 	return virq;
 }
 
-static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void x3proto_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct irq_data *data = irq_desc_get_irq_data(desc);
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
diff --git a/arch/sh/cchips/hd6446x/hd64461.c b/arch/sh/cchips/hd6446x/hd64461.c
index e973561..8180092 100644
--- a/arch/sh/cchips/hd6446x/hd64461.c
+++ b/arch/sh/cchips/hd6446x/hd64461.c
@@ -56,7 +56,7 @@
 	.irq_unmask	= hd64461_unmask_irq,
 };
 
-static void hd64461_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void hd64461_irq_demux(struct irq_desc *desc)
 {
 	unsigned short intv = __raw_readw(HD64461_NIRR);
 	unsigned int ext_irq = HD64461_IRQBASE;
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 0299f05..42efcf8 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -53,7 +53,7 @@
 }
 
 /* Handle one or multiple IRQs from the extended interrupt controller */
-static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
+static void leon_handle_ext_irq(struct irq_desc *desc)
 {
 	unsigned int eirq;
 	struct irq_bucket *p;
diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
index 3382f7b..1e77128 100644
--- a/arch/sparc/kernel/leon_pci_grpci1.c
+++ b/arch/sparc/kernel/leon_pci_grpci1.c
@@ -357,7 +357,7 @@
 };
 
 /* Handle one or multiple IRQs from the PCI core */
-static void grpci1_pci_flow_irq(unsigned int irq, struct irq_desc *desc)
+static void grpci1_pci_flow_irq(struct irq_desc *desc)
 {
 	struct grpci1_priv *priv = grpci1priv;
 	int i, ack = 0;
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
index 814fb17..f727c4d 100644
--- a/arch/sparc/kernel/leon_pci_grpci2.c
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -498,7 +498,7 @@
 };
 
 /* Handle one or multiple IRQs from the PCI core */
-static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc)
+static void grpci2_pci_flow_irq(struct irq_desc *desc)
 {
 	struct grpci2_priv *priv = grpci2priv;
 	int i, ack = 0;
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index f8b9f71..22564f5 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -812,7 +812,7 @@
 	if (image) {
 		bpf_flush_icache(image, image + proglen);
 		fp->bpf_func = (void *)image;
-		fp->jited = true;
+		fp->jited = 1;
 	}
 out:
 	kfree(addrs);
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index b3f73fd..4c017d0 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -304,17 +304,16 @@
  * to Linux which just calls handle_level_irq() after clearing the
  * MAC INTx Assert status bit associated with this interrupt.
  */
-static void trio_handle_level_irq(unsigned int __irq, struct irq_desc *desc)
+static void trio_handle_level_irq(struct irq_desc *desc)
 {
 	struct pci_controller *controller = irq_desc_get_handler_data(desc);
 	gxio_trio_context_t *trio_context = controller->trio;
 	uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
-	unsigned int irq = irq_desc_get_irq(desc);
 	int mac = controller->mac;
 	unsigned int reg_offset;
 	uint64_t level_mask;
 
-	handle_level_irq(irq, desc);
+	handle_level_irq(desc);
 
 	/*
 	 * Clear the INTx Level status, otherwise future interrupts are
diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c
index f0da5a2..9f1e05e 100644
--- a/arch/tile/kernel/usb.c
+++ b/arch/tile/kernel/usb.c
@@ -22,6 +22,7 @@
 #include <linux/platform_device.h>
 #include <linux/usb/tilegx.h>
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/types.h>
 
 static u64 ehci_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/unicore32/kernel/irq.c b/arch/unicore32/kernel/irq.c
index c53729d..eb1fd00 100644
--- a/arch/unicore32/kernel/irq.c
+++ b/arch/unicore32/kernel/irq.c
@@ -112,7 +112,7 @@
  * irq_controller_lock held, and IRQs disabled.  Decode the IRQ
  * and call the handler.
  */
-static void puv3_gpio_handler(unsigned int __irq, struct irq_desc *desc)
+static void puv3_gpio_handler(struct irq_desc *desc)
 {
 	unsigned int mask, irq;
 
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7aef2d5..328c835 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1006,7 +1006,7 @@
 	depends on X86_MCE_INTEL
 
 config X86_LEGACY_VM86
-	bool "Legacy VM86 support (obsolete)"
+	bool "Legacy VM86 support"
 	default n
 	depends on X86_32
 	---help---
@@ -1018,19 +1018,20 @@
 	  available to accelerate real mode DOS programs.  However, any
 	  recent version of DOSEMU, X, or vbetool should be fully
 	  functional even without kernel VM86 support, as they will all
-	  fall back to (pretty well performing) software emulation.
+	  fall back to software emulation. Nevertheless, if you are using
+	  a 16-bit DOS program where 16-bit performance matters, vm86
+	  mode might be faster than emulation and you might want to
+	  enable this option.
 
-	  Anything that works on a 64-bit kernel is unlikely to need
-	  this option, as 64-bit kernels don't, and can't, support V8086
-	  mode.  This option is also unrelated to 16-bit protected mode
-	  and is not needed to run most 16-bit programs under Wine.
+	  Note that any app that works on a 64-bit kernel is unlikely to
+	  need this option, as 64-bit kernels don't, and can't, support
+	  V8086 mode. This option is also unrelated to 16-bit protected
+	  mode and is not needed to run most 16-bit programs under Wine.
 
-	  Enabling this option adds considerable attack surface to the
-	  kernel and slows down system calls and exception handling.
+	  Enabling this option increases the complexity of the kernel
+	  and slows down exception handling a tiny bit.
 
-	  Unless you use very old userspace or need the last drop of
-	  performance in your real mode DOS games and can't use KVM,
-	  say N here.
+	  If unsure, say N here.
 
 config VM86
        bool
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index d303318..055a01d 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1128,7 +1128,18 @@
 
 /* Runs on exception stack */
 ENTRY(nmi)
+	/*
+	 * Fix up the exception frame if we're on Xen.
+	 * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
+	 * one value to the stack on native, so it may clobber the rdx
+	 * scratch slot, but it won't clobber any of the important
+	 * slots past it.
+	 *
+	 * Xen is a different story, because the Xen frame itself overlaps
+	 * the "NMI executing" variable.
+	 */
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
+
 	/*
 	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
 	 * the iretq it performs will take us out of NMI context.
@@ -1179,9 +1190,12 @@
 	 * we don't want to enable interrupts, because then we'll end
 	 * up in an awkward situation in which IRQs are on but NMIs
 	 * are off.
+	 *
+	 * We also must not push anything to the stack before switching
+	 * stacks lest we corrupt the "NMI executing" variable.
 	 */
 
-	SWAPGS
+	SWAPGS_UNSAFE_STACK
 	cld
 	movq	%rsp, %rdx
 	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 477bfa6..7663c45 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -381,3 +381,4 @@
 372	i386	recvmsg			sys_recvmsg			compat_sys_recvmsg
 373	i386	shutdown		sys_shutdown
 374	i386	userfaultfd		sys_userfaultfd
+375	i386	membarrier		sys_membarrier
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 81c4906..278842f 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -330,6 +330,7 @@
 321	common	bpf			sys_bpf
 322	64	execveat		stub_execveat
 323	common	userfaultfd		sys_userfaultfd
+324	common	membarrier		sys_membarrier
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 477fc28..e6cf2ad 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -241,6 +241,7 @@
 #define X86_FEATURE_AVX512PF	( 9*32+26) /* AVX-512 Prefetch */
 #define X86_FEATURE_AVX512ER	( 9*32+27) /* AVX-512 Exponential and Reciprocal */
 #define X86_FEATURE_AVX512CD	( 9*32+28) /* AVX-512 Conflict Detection */
+#define X86_FEATURE_SHA_NI	( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
 
 /* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
 #define X86_FEATURE_XSAVEOPT	(10*32+ 0) /* XSAVEOPT */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 155162e..ae68be9 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -86,6 +86,18 @@
 extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
 					u32 type, u64 attribute);
 
+#ifdef CONFIG_KASAN
+/*
+ * CONFIG_KASAN may redefine memset to __memset.  __memset function is present
+ * only in kernel binary.  Since the EFI stub linked into a separate binary it
+ * doesn't have __memset().  So we should use standard memset from
+ * arch/x86/boot/compressed/string.c.  The same applies to memcpy and memmove.
+ */
+#undef memcpy
+#undef memset
+#undef memmove
+#endif
+
 #endif /* CONFIG_X86_32 */
 
 extern struct efi_scratch efi_scratch;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c12e845..2beee03 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -40,6 +40,7 @@
 
 #define KVM_PIO_PAGE_OFFSET 1
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2
+#define KVM_HALT_POLL_NS_DEFAULT 500000
 
 #define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
 
@@ -711,6 +712,7 @@
 	u32 nmi_window_exits;
 	u32 halt_exits;
 	u32 halt_successful_poll;
+	u32 halt_attempted_poll;
 	u32 halt_wakeup;
 	u32 request_irq_exits;
 	u32 irq_exits;
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index c1c0a1c..b8c14bb 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -141,6 +141,8 @@
 #define DEBUGCTLMSR_BTS_OFF_USR		(1UL << 10)
 #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI	(1UL << 11)
 
+#define MSR_PEBS_FRONTEND		0x000003f7
+
 #define MSR_IA32_POWER_CTL		0x000001fc
 
 #define MSR_IA32_MC0_CTL		0x00000400
@@ -331,6 +333,7 @@
 /* C1E active bits in int pending message */
 #define K8_INTP_C1E_ACTIVE_MASK		0x18000000
 #define MSR_K8_TSEG_ADDR		0xc0010112
+#define MSR_K8_TSEG_MASK		0xc0010113
 #define K8_MTRRFIXRANGE_DRAM_ENABLE	0x00040000 /* MtrrFixDramEn bit    */
 #define K8_MTRRFIXRANGE_DRAM_MODIFY	0x00080000 /* MtrrFixDramModEn bit */
 #define K8_MTRR_RDMEM_WRMEM_MASK	0x18181818 /* Mask: RdMem|WrMem    */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index ce029e4..31247b5 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -97,7 +97,6 @@
 struct pv_time_ops {
 	unsigned long long (*sched_clock)(void);
 	unsigned long long (*steal_clock)(int cpu);
-	unsigned long (*get_tsc_khz)(void);
 };
 
 struct pv_cpu_ops {
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h
index 655e07a..67f0823 100644
--- a/arch/x86/include/asm/pvclock-abi.h
+++ b/arch/x86/include/asm/pvclock-abi.h
@@ -41,6 +41,7 @@
 
 #define PVCLOCK_TSC_STABLE_BIT	(1 << 0)
 #define PVCLOCK_GUEST_STOPPED	(1 << 1)
+/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */
 #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2)
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 9d51fae..eaba080 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -39,18 +39,27 @@
 }
 #endif
 
-#define virt_queued_spin_lock virt_queued_spin_lock
-
-static inline bool virt_queued_spin_lock(struct qspinlock *lock)
+#ifdef CONFIG_PARAVIRT
+#define virt_spin_lock virt_spin_lock
+static inline bool virt_spin_lock(struct qspinlock *lock)
 {
 	if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
 		return false;
 
-	while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
-		cpu_relax();
+	/*
+	 * On hypervisors without PARAVIRT_SPINLOCKS support we fall
+	 * back to a Test-and-Set spinlock, because fair locks have
+	 * horrible lock 'holder' preemption issues.
+	 */
+
+	do {
+		while (atomic_read(&lock->val) != 0)
+			cpu_relax();
+	} while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
 
 	return true;
 }
+#endif /* CONFIG_PARAVIRT */
 
 #include <asm-generic/qspinlock.h>
 
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c42827e..25f9093 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -338,10 +338,15 @@
 
 static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
 {
+	unsigned long flags;
+
 	if (instr[0] != 0x90)
 		return;
 
+	local_irq_save(flags);
 	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
+	sync_core();
+	local_irq_restore(flags);
 
 	DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
 		   instr, a->instrlen - a->padlen, a->padlen);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 3ca3e46..24e94ce 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -336,6 +336,13 @@
 	apic_write(APIC_LVTT, lvtt_value);
 
 	if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
+		/*
+		 * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
+		 * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
+		 * According to Intel, MFENCE can do the serialization here.
+		 */
+		asm volatile("mfence" : : : "memory");
+
 		printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
 		return;
 	}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 38a76f8..5c60bb1 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2522,6 +2522,7 @@
 	int pin, ioapic, irq, irq_entry;
 	const struct cpumask *mask;
 	struct irq_data *idata;
+	struct irq_chip *chip;
 
 	if (skip_ioapic_setup == 1)
 		return;
@@ -2545,9 +2546,9 @@
 		else
 			mask = apic->target_cpus();
 
-		irq_set_affinity(irq, mask);
+		chip = irq_data_get_irq_chip(idata);
+		chip->irq_set_affinity(idata, mask, false);
 	}
-
 }
 #endif
 
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 1bbd0fe..836d11b 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -489,10 +489,8 @@
 
 	err = assign_irq_vector(irq, data, dest);
 	if (err) {
-		struct irq_data *top = irq_get_irq_data(irq);
-
 		if (assign_irq_vector(irq, data,
-				      irq_data_get_affinity_mask(top)))
+				      irq_data_get_affinity_mask(irq_data)))
 			pr_err("Failed to recover vector for irq %d\n", irq);
 		return err;
 	}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 07ce52c..de22ea7 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1110,10 +1110,10 @@
 	else
 		printk(KERN_CONT "%d86", c->x86);
 
-	printk(KERN_CONT " (fam: %02x, model: %02x", c->x86, c->x86_model);
+	printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
 
 	if (c->x86_mask || c->cpuid_level >= 0)
-		printk(KERN_CONT ", stepping: %02x)\n", c->x86_mask);
+		printk(KERN_CONT ", stepping: 0x%x)\n", c->x86_mask);
 	else
 		printk(KERN_CONT ")\n");
 
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 5edf6d8..165be83 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -47,6 +47,7 @@
 	EXTRA_REG_RSP_1 = 1,	/* offcore_response_1 */
 	EXTRA_REG_LBR   = 2,	/* lbr_select */
 	EXTRA_REG_LDLAT = 3,	/* ld_lat_threshold */
+	EXTRA_REG_FE    = 4,    /* fe_* */
 
 	EXTRA_REG_MAX		/* number of entries needed */
 };
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index cd9b6d0..f63360b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -205,6 +205,11 @@
 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+	/*
+	 * Note the low 8 bits eventsel code is not a continuous field, containing
+	 * some #GPing bits. These are masked out.
+	 */
+	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
 	EVENT_EXTRA_END
 };
 
@@ -250,7 +255,7 @@
 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
 	INTEL_UEVENT_CONSTRAINT(0x148, 0x4),	/* L1D_PEND_MISS.PENDING */
-	INTEL_EVENT_CONSTRAINT(0xa3, 0x4),	/* CYCLE_ACTIVITY.* */
+	INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4),	/* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
 	EVENT_CONSTRAINT_END
 };
 
@@ -2316,9 +2321,12 @@
 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
 			    struct perf_event *event)
 {
-	struct event_constraint *c1 = cpuc->event_constraint[idx];
+	struct event_constraint *c1 = NULL;
 	struct event_constraint *c2;
 
+	if (idx >= 0) /* fake does < 0 */
+		c1 = cpuc->event_constraint[idx];
+
 	/*
 	 * first time only
 	 * - static constraint: no change across incremental scheduling calls
@@ -2888,6 +2896,8 @@
 
 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
 
+PMU_FORMAT_ATTR(frontend, "config1:0-23");
+
 static struct attribute *intel_arch3_formats_attr[] = {
 	&format_attr_event.attr,
 	&format_attr_umask.attr,
@@ -2904,6 +2914,11 @@
 	NULL,
 };
 
+static struct attribute *skl_format_attr[] = {
+	&format_attr_frontend.attr,
+	NULL,
+};
+
 static __initconst const struct x86_pmu core_pmu = {
 	.name			= "core",
 	.handle_irq		= x86_pmu_handle_irq,
@@ -3513,7 +3528,8 @@
 
 		x86_pmu.hw_config = hsw_hw_config;
 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
-		x86_pmu.cpu_events = hsw_events_attrs;
+		x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
+						  skl_format_attr);
 		WARN_ON(!x86_pmu.format_attrs);
 		x86_pmu.cpu_events = hsw_events_attrs;
 		pr_cont("Skylake events, ");
diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c
index 54690e8..d1c0f25 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_bts.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c
@@ -222,6 +222,7 @@
 	if (!buf || bts_buffer_is_full(buf, bts))
 		return;
 
+	event->hw.itrace_started = 1;
 	event->hw.state = 0;
 
 	if (!buf->snapshot)
diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c
index 086b12e..f32ac13 100644
--- a/arch/x86/kernel/cpu/perf_event_msr.c
+++ b/arch/x86/kernel/cpu/perf_event_msr.c
@@ -10,12 +10,12 @@
 	PERF_MSR_EVENT_MAX,
 };
 
-bool test_aperfmperf(int idx)
+static bool test_aperfmperf(int idx)
 {
 	return boot_cpu_has(X86_FEATURE_APERFMPERF);
 }
 
-bool test_intel(int idx)
+static bool test_intel(int idx)
 {
 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
 	    boot_cpu_data.x86 != 6)
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index c80cf66..38da8f2 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -68,11 +68,10 @@
 	return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
 }
 
-static inline int
-execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
 {
 	struct irq_stack *curstk, *irqstk;
-	u32 *isp, *prev_esp, arg1, arg2;
+	u32 *isp, *prev_esp, arg1;
 
 	curstk = (struct irq_stack *) current_stack();
 	irqstk = __this_cpu_read(hardirq_stack);
@@ -98,8 +97,8 @@
 	asm volatile("xchgl	%%ebx,%%esp	\n"
 		     "call	*%%edi		\n"
 		     "movl	%%ebx,%%esp	\n"
-		     : "=a" (arg1), "=d" (arg2), "=b" (isp)
-		     :  "0" (irq),   "1" (desc),  "2" (isp),
+		     : "=a" (arg1), "=b" (isp)
+		     :  "0" (desc),   "1" (isp),
 			"D" (desc->handle_irq)
 		     : "memory", "cc", "ecx");
 	return 1;
@@ -150,19 +149,15 @@
 
 bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
 {
-	unsigned int irq;
-	int overflow;
-
-	overflow = check_stack_overflow();
+	int overflow = check_stack_overflow();
 
 	if (IS_ERR_OR_NULL(desc))
 		return false;
 
-	irq = irq_desc_get_irq(desc);
-	if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
+	if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
 		if (unlikely(overflow))
 			print_stack_overflow();
-		generic_handle_irq_desc(irq, desc);
+		generic_handle_irq_desc(desc);
 	}
 
 	return true;
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index ff16ccb..c767cf2 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -75,6 +75,6 @@
 	if (unlikely(IS_ERR_OR_NULL(desc)))
 		return false;
 
-	generic_handle_irq_desc(irq_desc_get_irq(desc), desc);
+	generic_handle_irq_desc(desc);
 	return true;
 }
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 2bcc052..6acc9dd 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -58,7 +58,7 @@
 	if (alloc_size > PAGE_SIZE)
 		new_ldt->entries = vzalloc(alloc_size);
 	else
-		new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
+		new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
 
 	if (!new_ldt->entries) {
 		kfree(new_ldt);
@@ -95,7 +95,7 @@
 	if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
 		vfree(ldt->entries);
 	else
-		kfree(ldt->entries);
+		free_page((unsigned long)ldt->entries);
 	kfree(ldt);
 }
 
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index f68e48f..c2130ae 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -41,10 +41,18 @@
 #include <asm/timer.h>
 #include <asm/special_insns.h>
 
-/* nop stub */
-void _paravirt_nop(void)
-{
-}
+/*
+ * nop stub, which must not clobber anything *including the stack* to
+ * avoid confusing the entry prologues.
+ */
+extern void _paravirt_nop(void);
+asm (".pushsection .entry.text, \"ax\"\n"
+     ".global _paravirt_nop\n"
+     "_paravirt_nop:\n\t"
+     "ret\n\t"
+     ".size _paravirt_nop, . - _paravirt_nop\n\t"
+     ".type _paravirt_nop, @function\n\t"
+     ".popsection");
 
 /* identity function, which can be inlined */
 u32 _paravirt_ident_32(u32 x)
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 84b8ef8..1b55de1 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -131,8 +131,8 @@
 
 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
 {
-	*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
 	*gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+	*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
 
 	if (!*dev)
 		*dev = &x86_dma_fallback_dev;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c8d52cb..c3f7602 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -21,6 +21,7 @@
 #include <asm/hypervisor.h>
 #include <asm/nmi.h>
 #include <asm/x86_init.h>
+#include <asm/geode.h>
 
 unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
@@ -1013,15 +1014,17 @@
 
 static void __init check_system_tsc_reliable(void)
 {
-#ifdef CONFIG_MGEODE_LX
-	/* RTSC counts during suspend */
+#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
+	if (is_geode_lx()) {
+		/* RTSC counts during suspend */
 #define RTSC_SUSP 0x100
-	unsigned long res_low, res_high;
+		unsigned long res_low, res_high;
 
-	rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
-	/* Geode_LX - the OLPC CPU has a very reliable TSC */
-	if (res_low & RTSC_SUSP)
-		tsc_clocksource_reliable = 1;
+		rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
+		/* Geode_LX - the OLPC CPU has a very reliable TSC */
+		if (res_low & RTSC_SUSP)
+			tsc_clocksource_reliable = 1;
+	}
 #endif
 	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
 		tsc_clocksource_reliable = 1;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index abd8b856..5246193 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -45,6 +45,7 @@
 #include <linux/audit.h>
 #include <linux/stddef.h>
 #include <linux/slab.h>
+#include <linux/security.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -232,6 +233,32 @@
 	struct pt_regs *regs = current_pt_regs();
 	unsigned long err = 0;
 
+	err = security_mmap_addr(0);
+	if (err) {
+		/*
+		 * vm86 cannot virtualize the address space, so vm86 users
+		 * need to manage the low 1MB themselves using mmap.  Given
+		 * that BIOS places important data in the first page, vm86
+		 * is essentially useless if mmap_min_addr != 0.  DOSEMU,
+		 * for example, won't even bother trying to use vm86 if it
+		 * can't map a page at virtual address 0.
+		 *
+		 * To reduce the available kernel attack surface, simply
+		 * disallow vm86(old) for users who cannot mmap at va 0.
+		 *
+		 * The implementation of security_mmap_addr will allow
+		 * suitably privileged users to map va 0 even if
+		 * vm.mmap_min_addr is set above 0, and we want this
+		 * behavior for vm86 as well, as it ensures that legacy
+		 * tools like vbetool will not fail just because of
+		 * vm.mmap_min_addr.
+		 */
+		pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d).  Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
+			     current->comm, task_pid_nr(current),
+			     from_kuid_munged(&init_user_ns, current_uid()));
+		return -EPERM;
+	}
+
 	if (!vm86) {
 		if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
 			return -ENOMEM;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 69088a1..ff606f5 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3322,7 +3322,7 @@
 			break;
 
 		reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
-						    leaf);
+						    iterator.level);
 	}
 
 	walk_shadow_page_lockless_end(vcpu);
@@ -3614,7 +3614,7 @@
 __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
 			struct rsvd_bits_validate *rsvd_check,
 			int maxphyaddr, int level, bool nx, bool gbpages,
-			bool pse)
+			bool pse, bool amd)
 {
 	u64 exb_bit_rsvd = 0;
 	u64 gbpages_bit_rsvd = 0;
@@ -3631,7 +3631,7 @@
 	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
 	 * leaf entries) on AMD CPUs only.
 	 */
-	if (guest_cpuid_is_amd(vcpu))
+	if (amd)
 		nonleaf_bit8_rsvd = rsvd_bits(8, 8);
 
 	switch (level) {
@@ -3699,7 +3699,7 @@
 	__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
 				cpuid_maxphyaddr(vcpu), context->root_level,
 				context->nx, guest_cpuid_has_gbpages(vcpu),
-				is_pse(vcpu));
+				is_pse(vcpu), guest_cpuid_is_amd(vcpu));
 }
 
 static void
@@ -3749,13 +3749,24 @@
 void
 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
 {
+	/*
+	 * Passing "true" to the last argument is okay; it adds a check
+	 * on bit 8 of the SPTEs which KVM doesn't use anyway.
+	 */
 	__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
 				boot_cpu_data.x86_phys_bits,
 				context->shadow_root_level, context->nx,
-				guest_cpuid_has_gbpages(vcpu), is_pse(vcpu));
+				guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
+				true);
 }
 EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
 
+static inline bool boot_cpu_is_amd(void)
+{
+	WARN_ON_ONCE(!tdp_enabled);
+	return shadow_x_mask == 0;
+}
+
 /*
  * the direct page table on host, use as much mmu features as
  * possible, however, kvm currently does not do execution-protection.
@@ -3764,11 +3775,11 @@
 reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
 				struct kvm_mmu *context)
 {
-	if (guest_cpuid_is_amd(vcpu))
+	if (boot_cpu_is_amd())
 		__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
 					boot_cpu_data.x86_phys_bits,
 					context->shadow_root_level, false,
-					cpu_has_gbpages, true);
+					cpu_has_gbpages, true, true);
 	else
 		__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
 					    boot_cpu_data.x86_phys_bits,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index fdb8cb6..2f9ed1f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -202,6 +202,7 @@
 static int nested = true;
 module_param(nested, int, S_IRUGO);
 
+static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
 
@@ -513,7 +514,7 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	if (svm->vmcb->control.next_rip != 0) {
-		WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
+		WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
 		svm->next_rip = svm->vmcb->control.next_rip;
 	}
 
@@ -865,64 +866,6 @@
 	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
 }
 
-#define MTRR_TYPE_UC_MINUS	7
-#define MTRR2PROTVAL_INVALID 0xff
-
-static u8 mtrr2protval[8];
-
-static u8 fallback_mtrr_type(int mtrr)
-{
-	/*
-	 * WT and WP aren't always available in the host PAT.  Treat
-	 * them as UC and UC- respectively.  Everything else should be
-	 * there.
-	 */
-	switch (mtrr)
-	{
-	case MTRR_TYPE_WRTHROUGH:
-		return MTRR_TYPE_UNCACHABLE;
-	case MTRR_TYPE_WRPROT:
-		return MTRR_TYPE_UC_MINUS;
-	default:
-		BUG();
-	}
-}
-
-static void build_mtrr2protval(void)
-{
-	int i;
-	u64 pat;
-
-	for (i = 0; i < 8; i++)
-		mtrr2protval[i] = MTRR2PROTVAL_INVALID;
-
-	/* Ignore the invalid MTRR types.  */
-	mtrr2protval[2] = 0;
-	mtrr2protval[3] = 0;
-
-	/*
-	 * Use host PAT value to figure out the mapping from guest MTRR
-	 * values to nested page table PAT/PCD/PWT values.  We do not
-	 * want to change the host PAT value every time we enter the
-	 * guest.
-	 */
-	rdmsrl(MSR_IA32_CR_PAT, pat);
-	for (i = 0; i < 8; i++) {
-		u8 mtrr = pat >> (8 * i);
-
-		if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
-			mtrr2protval[mtrr] = __cm_idx2pte(i);
-	}
-
-	for (i = 0; i < 8; i++) {
-		if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
-			u8 fallback = fallback_mtrr_type(i);
-			mtrr2protval[i] = mtrr2protval[fallback];
-			BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
-		}
-	}
-}
-
 static __init int svm_hardware_setup(void)
 {
 	int cpu;
@@ -989,7 +932,6 @@
 	} else
 		kvm_disable_tdp();
 
-	build_mtrr2protval();
 	return 0;
 
 err:
@@ -1144,43 +1086,6 @@
 	return target_tsc - tsc;
 }
 
-static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
-{
-	struct kvm_vcpu *vcpu = &svm->vcpu;
-
-	/* Unlike Intel, AMD takes the guest's CR0.CD into account.
-	 *
-	 * AMD does not have IPAT.  To emulate it for the case of guests
-	 * with no assigned devices, just set everything to WB.  If guests
-	 * have assigned devices, however, we cannot force WB for RAM
-	 * pages only, so use the guest PAT directly.
-	 */
-	if (!kvm_arch_has_assigned_device(vcpu->kvm))
-		*g_pat = 0x0606060606060606;
-	else
-		*g_pat = vcpu->arch.pat;
-}
-
-static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
-{
-	u8 mtrr;
-
-	/*
-	 * 1. MMIO: trust guest MTRR, so same as item 3.
-	 * 2. No passthrough: always map as WB, and force guest PAT to WB as well
-	 * 3. Passthrough: can't guarantee the result, try to trust guest.
-	 */
-	if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
-		return 0;
-
-	if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) &&
-	    kvm_read_cr0(vcpu) & X86_CR0_CD)
-		return _PAGE_NOCACHE;
-
-	mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
-	return mtrr2protval[mtrr];
-}
-
 static void init_vmcb(struct vcpu_svm *svm, bool init_event)
 {
 	struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1263,7 +1168,8 @@
 	 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
 	 * It also updates the guest-visible cr0 value.
 	 */
-	(void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
+	svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
+	kvm_mmu_reset_context(&svm->vcpu);
 
 	save->cr4 = X86_CR4_PAE;
 	/* rdx = ?? */
@@ -1276,7 +1182,6 @@
 		clr_cr_intercept(svm, INTERCEPT_CR3_READ);
 		clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
 		save->g_pat = svm->vcpu.arch.pat;
-		svm_set_guest_pat(svm, &save->g_pat);
 		save->cr3 = 0;
 		save->cr4 = 0;
 	}
@@ -1671,10 +1576,13 @@
 
 	if (!vcpu->fpu_active)
 		cr0 |= X86_CR0_TS;
-
-	/* These are emulated via page tables.  */
-	cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
-
+	/*
+	 * re-enable caching here because the QEMU bios
+	 * does not do it - this results in some delay at
+	 * reboot
+	 */
+	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+		cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
 	svm->vmcb->save.cr0 = cr0;
 	mark_dirty(svm->vmcb, VMCB_CR);
 	update_cr0_intercept(svm);
@@ -3349,16 +3257,6 @@
 	case MSR_VM_IGNNE:
 		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
 		break;
-	case MSR_IA32_CR_PAT:
-		if (npt_enabled) {
-			if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
-				return 1;
-			vcpu->arch.pat = data;
-			svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
-			mark_dirty(svm->vmcb, VMCB_NPT);
-			break;
-		}
-		/* fall through */
 	default:
 		return kvm_set_msr_common(vcpu, msr);
 	}
@@ -4193,6 +4091,11 @@
 	return true;
 }
 
+static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+	return 0;
+}
+
 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 {
 }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d019868..06ef490 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6064,6 +6064,8 @@
 	memcpy(vmx_msr_bitmap_longmode_x2apic,
 			vmx_msr_bitmap_longmode, PAGE_SIZE);
 
+	set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
+
 	if (enable_apicv) {
 		for (msr = 0x800; msr <= 0x8ff; msr++)
 			vmx_disable_intercept_msr_read_x2apic(msr);
@@ -8615,17 +8617,22 @@
 	u64 ipat = 0;
 
 	/* For VT-d and EPT combination
-	 * 1. MMIO: guest may want to apply WC, trust it.
+	 * 1. MMIO: always map as UC
 	 * 2. EPT with VT-d:
 	 *   a. VT-d without snooping control feature: can't guarantee the
-	 *	result, try to trust guest.  So the same as item 1.
+	 *	result, try to trust guest.
 	 *   b. VT-d with snooping control feature: snooping control feature of
 	 *	VT-d engine can guarantee the cache correctness. Just set it
 	 *	to WB to keep consistent with host. So the same as item 3.
 	 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
 	 *    consistent with host MTRR
 	 */
-	if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
+	if (is_mmio) {
+		cache = MTRR_TYPE_UNCACHABLE;
+		goto exit;
+	}
+
+	if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
 		ipat = VMX_EPT_IPAT_BIT;
 		cache = MTRR_TYPE_WRBACK;
 		goto exit;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a60bdbc..92511d4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -149,6 +149,7 @@
 	{ "nmi_window", VCPU_STAT(nmi_window_exits) },
 	{ "halt_exits", VCPU_STAT(halt_exits) },
 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
+	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
 	{ "hypercalls", VCPU_STAT(hypercalls) },
 	{ "request_irq", VCPU_STAT(request_irq_exits) },
@@ -1707,8 +1708,6 @@
 		vcpu->pvclock_set_guest_stopped_request = false;
 	}
 
-	pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO;
-
 	/* If the host uses TSC clocksource, then it is stable */
 	if (use_master_clock)
 		pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
@@ -2006,8 +2005,6 @@
 					&vcpu->requests);
 
 			ka->boot_vcpu_runs_old_kvmclock = tmp;
-
-			ka->kvmclock_offset = -get_kernel_ns();
 		}
 
 		vcpu->arch.time = data;
@@ -2189,6 +2186,8 @@
 	case MSR_IA32_LASTINTFROMIP:
 	case MSR_IA32_LASTINTTOIP:
 	case MSR_K8_SYSCFG:
+	case MSR_K8_TSEG_ADDR:
+	case MSR_K8_TSEG_MASK:
 	case MSR_K7_HWCR:
 	case MSR_VM_HSAVE_PA:
 	case MSR_K8_INT_PENDING_MSG:
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 161804d..a0d09f6 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1015,7 +1015,7 @@
  * This is the Guest timer interrupt handler (hardware interrupt 0).  We just
  * call the clockevent infrastructure and it does whatever needs doing.
  */
-static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
+static void lguest_time_irq(struct irq_desc *desc)
 {
 	unsigned long flags;
 
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 66338a6..c2aea63 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -192,10 +192,11 @@
 
 	node_set(node, numa_nodes_parsed);
 
-	pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s\n",
+	pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n",
 		node, pxm,
 		(unsigned long long) start, (unsigned long long) end - 1,
-		hotpluggable ? " hotplug" : "");
+		hotpluggable ? " hotplug" : "",
+		ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : "");
 
 	/* Mark hotplug range in memblock. */
 	if (hotpluggable && memblock_mark_hotplug(start, ma->length))
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 70efcd0..7599197 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1109,7 +1109,7 @@
 		bpf_flush_icache(header, image + proglen);
 		set_memory_ro((unsigned long)header, header->pages);
 		prog->bpf_func = (void *)image;
-		prog->jited = true;
+		prog->jited = 1;
 	}
 out:
 	kfree(addrs);
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 09d3afc..dc78a4a 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -166,6 +166,7 @@
 {
 	struct pci_dev *dev;
 
+	pci_read_bridge_bases(b);
 	list_for_each_entry(dev, &b->devices, bus_list)
 		pcibios_fixup_device_resources(dev);
 }
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index d27b4dc..b848cc3 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -210,6 +210,10 @@
 
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
+	if (bus->parent) {
+		/* This is a subordinate bridge */
+		pci_read_bridge_bases(bus);
+	}
 }
 
 void pcibios_set_master(struct pci_dev *dev)
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 4aecca7..14b8faf 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -140,6 +140,11 @@
 
 	iv = bip->bip_vec + bip->bip_vcnt;
 
+	if (bip->bip_vcnt &&
+	    bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
+			     &bip->bip_vec[bip->bip_vcnt - 1], offset))
+		return 0;
+
 	iv->bv_page = page;
 	iv->bv_len = len;
 	iv->bv_offset = offset;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index ac8370c..55512dd 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -370,6 +370,9 @@
 		blkg_destroy(blkg);
 		spin_unlock(&blkcg->lock);
 	}
+
+	q->root_blkg = NULL;
+	q->root_rl.blkg = NULL;
 }
 
 /*
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index f548b64..75f29cf 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -204,6 +204,9 @@
 	    q->limits.max_integrity_segments)
 		return false;
 
+	if (integrity_req_gap_back_merge(req, next->bio))
+		return false;
+
 	return true;
 }
 EXPORT_SYMBOL(blk_integrity_merge_rq);
diff --git a/block/blk-map.c b/block/blk-map.c
index 2338416..f565e11 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -9,6 +9,24 @@
 
 #include "blk.h"
 
+static bool iovec_gap_to_prv(struct request_queue *q,
+			     struct iovec *prv, struct iovec *cur)
+{
+	unsigned long prev_end;
+
+	if (!queue_virt_boundary(q))
+		return false;
+
+	if (prv->iov_base == NULL && prv->iov_len == 0)
+		/* prv is not set - don't check */
+		return false;
+
+	prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
+
+	return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
+		prev_end & queue_virt_boundary(q));
+}
+
 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
 		      struct bio *bio)
 {
@@ -67,7 +85,7 @@
 	struct bio *bio;
 	int unaligned = 0;
 	struct iov_iter i;
-	struct iovec iov;
+	struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
 
 	if (!iter || !iter->count)
 		return -EINVAL;
@@ -81,8 +99,12 @@
 		/*
 		 * Keep going so we check length of all segments
 		 */
-		if (uaddr & queue_dma_alignment(q))
+		if ((uaddr & queue_dma_alignment(q)) ||
+		    iovec_gap_to_prv(q, &prv, &iov))
 			unaligned = 1;
+
+		prv.iov_base = iov.iov_base;
+		prv.iov_len = iov.iov_len;
 	}
 
 	if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index d088cff..c4e9c37 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -66,36 +66,33 @@
 					 struct bio *bio,
 					 struct bio_set *bs)
 {
-	struct bio *split;
-	struct bio_vec bv, bvprv;
+	struct bio_vec bv, bvprv, *bvprvp = NULL;
 	struct bvec_iter iter;
 	unsigned seg_size = 0, nsegs = 0, sectors = 0;
-	int prev = 0;
 
 	bio_for_each_segment(bv, bio, iter) {
-		sectors += bv.bv_len >> 9;
-
-		if (sectors > queue_max_sectors(q))
+		if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
 			goto split;
 
 		/*
 		 * If the queue doesn't support SG gaps and adding this
 		 * offset would create a gap, disallow it.
 		 */
-		if (prev && bvec_gap_to_prev(q, &bvprv, bv.bv_offset))
+		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
 			goto split;
 
-		if (prev && blk_queue_cluster(q)) {
+		if (bvprvp && blk_queue_cluster(q)) {
 			if (seg_size + bv.bv_len > queue_max_segment_size(q))
 				goto new_segment;
-			if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
+			if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
 				goto new_segment;
-			if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
+			if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
 				goto new_segment;
 
 			seg_size += bv.bv_len;
 			bvprv = bv;
-			prev = 1;
+			bvprvp = &bv;
+			sectors += bv.bv_len >> 9;
 			continue;
 		}
 new_segment:
@@ -104,23 +101,14 @@
 
 		nsegs++;
 		bvprv = bv;
-		prev = 1;
+		bvprvp = &bv;
 		seg_size = bv.bv_len;
+		sectors += bv.bv_len >> 9;
 	}
 
 	return NULL;
 split:
-	split = bio_clone_bioset(bio, GFP_NOIO, bs);
-
-	split->bi_iter.bi_size -= iter.bi_size;
-	bio->bi_iter = iter;
-
-	if (bio_integrity(bio)) {
-		bio_integrity_advance(bio, split->bi_iter.bi_size);
-		bio_integrity_trim(split, 0, bio_sectors(split));
-	}
-
-	return split;
+	return bio_split(bio, sectors, GFP_NOIO, bs);
 }
 
 void blk_queue_split(struct request_queue *q, struct bio **bio,
@@ -439,6 +427,11 @@
 int ll_back_merge_fn(struct request_queue *q, struct request *req,
 		     struct bio *bio)
 {
+	if (req_gap_back_merge(req, bio))
+		return 0;
+	if (blk_integrity_rq(req) &&
+	    integrity_req_gap_back_merge(req, bio))
+		return 0;
 	if (blk_rq_sectors(req) + bio_sectors(bio) >
 	    blk_rq_get_max_sectors(req)) {
 		req->cmd_flags |= REQ_NOMERGE;
@@ -457,6 +450,12 @@
 int ll_front_merge_fn(struct request_queue *q, struct request *req,
 		      struct bio *bio)
 {
+
+	if (req_gap_front_merge(req, bio))
+		return 0;
+	if (blk_integrity_rq(req) &&
+	    integrity_req_gap_front_merge(req, bio))
+		return 0;
 	if (blk_rq_sectors(req) + bio_sectors(bio) >
 	    blk_rq_get_max_sectors(req)) {
 		req->cmd_flags |= REQ_NOMERGE;
@@ -483,14 +482,6 @@
 	return !q->mq_ops && req->special;
 }
 
-static int req_gap_to_prev(struct request *req, struct bio *next)
-{
-	struct bio *prev = req->biotail;
-
-	return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1],
-			next->bi_io_vec[0].bv_offset);
-}
-
 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 				struct request *next)
 {
@@ -505,7 +496,7 @@
 	if (req_no_special_merge(req) || req_no_special_merge(next))
 		return 0;
 
-	if (req_gap_to_prev(req, next->bio))
+	if (req_gap_back_merge(req, next->bio))
 		return 0;
 
 	/*
@@ -713,10 +704,6 @@
 	    !blk_write_same_mergeable(rq->bio, bio))
 		return false;
 
-	/* Only check gaps if the bio carries data */
-	if (bio_has_data(bio) && req_gap_to_prev(rq, bio))
-		return false;
-
 	return true;
 }
 
diff --git a/block/bounce.c b/block/bounce.c
index 0611aea..1cb5dd3 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -128,12 +128,14 @@
 	struct bio *bio_orig = bio->bi_private;
 	struct bio_vec *bvec, *org_vec;
 	int i;
+	int start = bio_orig->bi_iter.bi_idx;
 
 	/*
 	 * free up bounce indirect pages used
 	 */
 	bio_for_each_segment_all(bvec, bio, i) {
-		org_vec = bio_orig->bi_io_vec + i;
+		org_vec = bio_orig->bi_io_vec + i + start;
+
 		if (bvec->bv_page == org_vec->bv_page)
 			continue;
 
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 6d88dd1..1970966 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -332,10 +332,6 @@
 		srlen = cert->raw_serial_size;
 		q = cert->raw_serial;
 	}
-	if (srlen > 1 && *q == 0) {
-		srlen--;
-		q++;
-	}
 
 	ret = -ENOMEM;
 	desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 35c2de1..fa18753 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -940,6 +940,7 @@
 	char *xbuf[XBUFSIZE];
 	char *xoutbuf[XBUFSIZE];
 	int ret = -ENOMEM;
+	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
 
 	if (testmgr_alloc_buf(xbuf))
 		goto out_nobuf;
@@ -975,7 +976,7 @@
 			continue;
 
 		if (template[i].iv)
-			memcpy(iv, template[i].iv, MAX_IVLEN);
+			memcpy(iv, template[i].iv, ivsize);
 		else
 			memset(iv, 0, MAX_IVLEN);
 
@@ -1051,7 +1052,7 @@
 			continue;
 
 		if (template[i].iv)
-			memcpy(iv, template[i].iv, MAX_IVLEN);
+			memcpy(iv, template[i].iv, ivsize);
 		else
 			memset(iv, 0, MAX_IVLEN);
 
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 46506e7..a212cef 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -315,14 +315,10 @@
 
 	capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
 	capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
-#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
-			defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
-	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
-#endif
-
-#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
-	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
-#endif
+	if (IS_ENABLED(CONFIG_ACPI_PROCESSOR_AGGREGATOR))
+		capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
+	if (IS_ENABLED(CONFIG_ACPI_PROCESSOR))
+		capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
 
 	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
 
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 2614a83..42c66b6 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1044,8 +1044,10 @@
 		goto err_exit;
 
 	mutex_lock(&ec->mutex);
+	result = -ENODATA;
 	list_for_each_entry(handler, &ec->list, node) {
 		if (value == handler->query_bit) {
+			result = 0;
 			q->handler = acpi_ec_get_query_handler(handler);
 			ec_dbg_evt("Query(0x%02x) scheduled",
 				   q->handler->query_bit);
diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c
index 9dcf836..33505c6 100644
--- a/drivers/acpi/int340x_thermal.c
+++ b/drivers/acpi/int340x_thermal.c
@@ -33,13 +33,12 @@
 static int int340x_thermal_handler_attach(struct acpi_device *adev,
 					const struct acpi_device_id *id)
 {
-#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
-	acpi_create_platform_device(adev);
-#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE)
-	/* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
-	if (id->driver_data == INT3401_DEVICE)
+	if (IS_ENABLED(CONFIG_INT340X_THERMAL))
 		acpi_create_platform_device(adev);
-#endif
+	/* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
+	else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
+		 id->driver_data == INT3401_DEVICE)
+		acpi_create_platform_device(adev);
 	return 1;
 }
 
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 6da0f9b..c933675 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -372,6 +372,7 @@
 
 	/* Interrupt Line values above 0xF are forbidden */
 	if (dev->irq > 0 && (dev->irq <= 0xF) &&
+	    acpi_isa_irq_available(dev->irq) &&
 	    (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
 		dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
 			 pin_name(dev->pin), dev->irq);
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 3b4ea98..7c8408b 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -498,8 +498,7 @@
 			    PIRQ_PENALTY_PCI_POSSIBLE;
 		}
 	}
-	/* Add a penalty for the SCI */
-	acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
+
 	return 0;
 }
 
@@ -553,6 +552,13 @@
 				irq = link->irq.possible[i];
 		}
 	}
+	if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
+		printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
+			    "Try pci=noacpi or acpi=off\n",
+			    acpi_device_name(link->device),
+			    acpi_device_bid(link->device));
+		return -ENODEV;
+	}
 
 	/* Attempt to enable the link device at this IRQ. */
 	if (acpi_pci_link_set(link, irq)) {
@@ -821,6 +827,12 @@
 	}
 }
 
+bool acpi_isa_irq_available(int irq)
+{
+	return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
+			    acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
+}
+
 /*
  * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
  * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index fc28b9f..30d8518 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -525,8 +525,7 @@
 
 /* sys I/F for generic thermal sysfs support */
 
-static int thermal_get_temp(struct thermal_zone_device *thermal,
-			    unsigned long *temp)
+static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
 {
 	struct acpi_thermal *tz = thermal->devdata;
 	int result;
@@ -633,7 +632,7 @@
 }
 
 static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
-				 int trip, unsigned long *temp)
+				 int trip, int *temp)
 {
 	struct acpi_thermal *tz = thermal->devdata;
 	int i;
@@ -686,7 +685,8 @@
 }
 
 static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
-				unsigned long *temperature) {
+				int *temperature)
+{
 	struct acpi_thermal *tz = thermal->devdata;
 
 	if (tz->trips.critical.flags.valid) {
@@ -709,8 +709,8 @@
 		return -EINVAL;
 
 	if (type == THERMAL_TRIP_ACTIVE) {
-		unsigned long trip_temp;
-		unsigned long temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+		int trip_temp;
+		int temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
 					tz->temperature, tz->kelvin_offset);
 		if (thermal_get_trip_temp(thermal, trip, &trip_temp))
 			return -EINVAL;
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index a8da3a5..0f5cb37 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1578,9 +1578,7 @@
 
 	kfree(he_dev->rbpl_virt);
 	kfree(he_dev->rbpl_table);
-
-	if (he_dev->rbpl_pool)
-		dma_pool_destroy(he_dev->rbpl_pool);
+	dma_pool_destroy(he_dev->rbpl_pool);
 
 	if (he_dev->rbrq_base)
 		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
@@ -1594,8 +1592,7 @@
 		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
 				  he_dev->tpdrq_base, he_dev->tpdrq_phys);
 
-	if (he_dev->tpd_pool)
-		dma_pool_destroy(he_dev->tpd_pool);
+	dma_pool_destroy(he_dev->tpd_pool);
 
 	if (he_dev->pci_dev) {
 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 65e6590..7d00f29 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -112,7 +112,8 @@
 
 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
-   if (!entry) return -1;
+   if (!entry)
+      return -ENOMEM;
    entry->data = data;
    entry->next = NULL;
    if (que->next == NULL) 
@@ -1175,7 +1176,7 @@
         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
            if (vcc->vci < 32)
               printk("Drop control packets\n");
-	      goto out_free_desc;
+	   goto out_free_desc;
         }
 	skb_put(skb,len);  
         // pwang_test
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 74e18b0..3d7fb65 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -805,7 +805,12 @@
 					continue;
 				}
 
-				skb = alloc_skb(size + 1, GFP_ATOMIC);
+				/* Use netdev_alloc_skb() because it adds NET_SKB_PAD of
+				 * headroom, and ensures we can route packets back out an
+				 * Ethernet interface (for example) without having to
+				 * reallocate. Adding NET_IP_ALIGN also ensures that both
+				 * PPPoATM and PPPoEoBR2684 packets end up aligned. */
+				skb = netdev_alloc_skb_ip_align(NULL, size + 1);
 				if (!skb) {
 					if (net_ratelimit())
 						dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n");
@@ -869,7 +874,10 @@
 		/* Allocate RX skbs for any ports which need them */
 		if (card->using_dma && card->atmdev[port] &&
 		    !card->rx_skb[port]) {
-			struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC);
+			/* Unlike the MMIO case (qv) we can't add NET_IP_ALIGN
+			 * here; the FPGA can only DMA to addresses which are
+			 * aligned to 4 bytes. */
+			struct sk_buff *skb = dev_alloc_skb(RX_DMA_SIZE);
 			if (skb) {
 				SKB_CB(skb)->dma_addr =
 					dma_map_single(&card->dev->dev, skb->data,
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 764280a..e9fd32e 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -148,7 +148,11 @@
 
 			if (sibling == cpu) /* skip itself */
 				continue;
+
 			sib_cpu_ci = get_cpu_cacheinfo(sibling);
+			if (!sib_cpu_ci->info_list)
+				continue;
+
 			sib_leaf = sib_cpu_ci->info_list + index;
 			cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
 			cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
@@ -159,6 +163,9 @@
 
 static void free_cache_attributes(unsigned int cpu)
 {
+	if (!per_cpu_cacheinfo(cpu))
+		return;
+
 	cache_shared_cpu_map_remove(cpu);
 
 	kfree(per_cpu_cacheinfo(cpu));
@@ -514,8 +521,7 @@
 		break;
 	case CPU_DEAD:
 		cache_remove_dev(cpu);
-		if (per_cpu_cacheinfo(cpu))
-			free_cache_attributes(cpu);
+		free_cache_attributes(cpu);
 		break;
 	}
 	return notifier_from_errno(rc);
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 1857a5d..134483d 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -63,20 +63,8 @@
 			     unsigned int virq, irq_hw_number_t hwirq,
 			     msi_alloc_info_t *arg)
 {
-	struct irq_data *data;
-
-	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
-				      info->chip, info->chip_data);
-
-	/*
-	 * Save the MSI descriptor in handler_data so that the
-	 * irq_write_msi_msg callback can retrieve it (and the
-	 * associated device).
-	 */
-	data = irq_domain_get_irq_data(domain, virq);
-	data->handler_data = arg->desc;
-
-	return 0;
+	return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+					     info->chip, info->chip_data);
 }
 #else
 #define platform_msi_set_desc		NULL
@@ -97,7 +85,7 @@
 
 static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
 {
-	struct msi_desc *desc = irq_data_get_irq_handler_data(data);
+	struct msi_desc *desc = irq_data_get_msi_desc(data);
 	struct platform_msi_priv_data *priv_data;
 
 	priv_data = desc->platform.msi_priv_data;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 4167201..16550c6 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -213,6 +213,18 @@
 }
 
 /**
+ * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
+ * @genpd: PM domait to power off.
+ *
+ * Queue up the execution of pm_genpd_poweroff() unless it's already been done
+ * before.
+ */
+static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+{
+	queue_work(pm_wq, &genpd->power_off_work);
+}
+
+/**
  * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
  * @genpd: PM domain to power up.
  *
@@ -259,8 +271,12 @@
 	return 0;
 
  err:
-	list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
+	list_for_each_entry_continue_reverse(link,
+					&genpd->slave_links,
+					slave_node) {
 		genpd_sd_counter_dec(link->master);
+		genpd_queue_power_off_work(link->master);
+	}
 
 	return ret;
 }
@@ -349,18 +365,6 @@
 }
 
 /**
- * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
- * @genpd: PM domait to power off.
- *
- * Queue up the execution of pm_genpd_poweroff() unless it's already been done
- * before.
- */
-static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
-{
-	queue_work(pm_wq, &genpd->power_off_work);
-}
-
-/**
  * pm_genpd_poweroff - Remove power from a given PM domain.
  * @genpd: PM domain to power down.
  *
@@ -1469,6 +1473,13 @@
 
 	mutex_lock(&genpd->lock);
 
+	if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
+		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
+			subdomain->name);
+		ret = -EBUSY;
+		goto out;
+	}
+
 	list_for_each_entry(link, &genpd->master_links, master_node) {
 		if (link->slave != subdomain)
 			continue;
@@ -1487,6 +1498,7 @@
 		break;
 	}
 
+out:
 	mutex_unlock(&genpd->lock);
 
 	return ret;
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index eb25449..7ae7cd9 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -341,6 +341,34 @@
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
 
 /**
+ * dev_pm_opp_get_suspend_opp() - Get suspend opp
+ * @dev:	device for which we do this operation
+ *
+ * Return: This function returns pointer to the suspend opp if it is
+ * defined and available, otherwise it returns NULL.
+ *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
+ */
+struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
+{
+	struct device_opp *dev_opp;
+
+	opp_rcu_lockdep_assert();
+
+	dev_opp = _find_device_opp(dev);
+	if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
+	    !dev_opp->suspend_opp->available)
+		return NULL;
+
+	return dev_opp->suspend_opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
+
+/**
  * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
  * @dev:	device for which we do this operation
  *
@@ -864,10 +892,17 @@
 	u32 microvolt[3] = {0};
 	int count, ret;
 
-	count = of_property_count_u32_elems(opp->np, "opp-microvolt");
-	if (!count)
+	/* Missing property isn't a problem, but an invalid entry is */
+	if (!of_find_property(opp->np, "opp-microvolt", NULL))
 		return 0;
 
+	count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+	if (count < 0) {
+		dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
+			__func__, count);
+		return count;
+	}
+
 	/* There can be one or three elements here */
 	if (count != 1 && count != 3) {
 		dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
@@ -1035,7 +1070,7 @@
  * share a common logic which is isolated here.
  *
  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
@@ -1123,7 +1158,7 @@
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  *
  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  */
 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
@@ -1149,7 +1184,7 @@
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  *
  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  */
 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index cc55788..628ad7a 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -98,6 +98,8 @@
 
 	int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
 	int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+	int (*reg_update_bits)(void *context, unsigned int reg,
+			       unsigned int mask, unsigned int val);
 
 	bool defer_caching;
 
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index afaf562..8cd155a 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -619,6 +619,7 @@
 		goto skip_format_initialization;
 	} else {
 		map->reg_read  = _regmap_bus_read;
+		map->reg_update_bits = bus->reg_update_bits;
 	}
 
 	reg_endian = regmap_get_reg_endian(bus, config);
@@ -2509,20 +2510,26 @@
 	int ret;
 	unsigned int tmp, orig;
 
-	ret = _regmap_read(map, reg, &orig);
-	if (ret != 0)
-		return ret;
+	if (change)
+		*change = false;
 
-	tmp = orig & ~mask;
-	tmp |= val & mask;
-
-	if (force_write || (tmp != orig)) {
-		ret = _regmap_write(map, reg, tmp);
-		if (change)
+	if (regmap_volatile(map, reg) && map->reg_update_bits) {
+		ret = map->reg_update_bits(map->bus_context, reg, mask, val);
+		if (ret == 0 && change)
 			*change = true;
 	} else {
-		if (change)
-			*change = false;
+		ret = _regmap_read(map, reg, &orig);
+		if (ret != 0)
+			return ret;
+
+		tmp = orig & ~mask;
+		tmp |= val & mask;
+
+		if (force_write || (tmp != orig)) {
+			ret = _regmap_write(map, reg, tmp);
+			if (ret == 0 && change)
+				*change = true;
+		}
 	}
 
 	return ret;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 17269a3..a295b98 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -406,6 +406,22 @@
 	.complete	= null_softirq_done_fn,
 };
 
+static void cleanup_queue(struct nullb_queue *nq)
+{
+	kfree(nq->tag_map);
+	kfree(nq->cmds);
+}
+
+static void cleanup_queues(struct nullb *nullb)
+{
+	int i;
+
+	for (i = 0; i < nullb->nr_queues; i++)
+		cleanup_queue(&nullb->queues[i]);
+
+	kfree(nullb->queues);
+}
+
 static void null_del_dev(struct nullb *nullb)
 {
 	list_del_init(&nullb->list);
@@ -415,6 +431,7 @@
 	if (queue_mode == NULL_Q_MQ)
 		blk_mq_free_tag_set(&nullb->tag_set);
 	put_disk(nullb->disk);
+	cleanup_queues(nullb);
 	kfree(nullb);
 }
 
@@ -459,22 +476,6 @@
 	return 0;
 }
 
-static void cleanup_queue(struct nullb_queue *nq)
-{
-	kfree(nq->tag_map);
-	kfree(nq->cmds);
-}
-
-static void cleanup_queues(struct nullb *nullb)
-{
-	int i;
-
-	for (i = 0; i < nullb->nr_queues; i++)
-		cleanup_queue(&nullb->queues[i]);
-
-	kfree(nullb->queues);
-}
-
 static int setup_queues(struct nullb *nullb)
 {
 	nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
@@ -588,8 +589,7 @@
 	blk_queue_physical_block_size(nullb->q, bs);
 
 	size = gb * 1024 * 1024 * 1024ULL;
-	sector_div(size, bs);
-	set_capacity(disk, size);
+	set_capacity(disk, size >> 9);
 
 	disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
 	disk->major		= null_major;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 698f761..d93a037 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4673,7 +4673,10 @@
 	}
 
 	ret = rbd_dev_v2_snap_context(rbd_dev);
-	dout("rbd_dev_v2_snap_context returned %d\n", ret);
+	if (ret && first_time) {
+		kfree(rbd_dev->header.object_prefix);
+		rbd_dev->header.object_prefix = NULL;
+	}
 
 	return ret;
 }
@@ -5154,7 +5157,6 @@
 out_err:
 	if (parent) {
 		rbd_dev_unparent(rbd_dev);
-		kfree(rbd_dev->header_name);
 		rbd_dev_destroy(parent);
 	} else {
 		rbd_put_client(rbdc);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 965d1af..5cb13ca 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -330,12 +330,14 @@
  * allocate new zcomp and initialize it. return compressing
  * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
  * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
- * case of allocation error.
+ * case of allocation error, or any other error potentially
+ * returned by functions zcomp_strm_{multi,single}_create.
  */
 struct zcomp *zcomp_create(const char *compress, int max_strm)
 {
 	struct zcomp *comp;
 	struct zcomp_backend *backend;
+	int error;
 
 	backend = find_backend(compress);
 	if (!backend)
@@ -347,12 +349,12 @@
 
 	comp->backend = backend;
 	if (max_strm > 1)
-		zcomp_strm_multi_create(comp, max_strm);
+		error = zcomp_strm_multi_create(comp, max_strm);
 	else
-		zcomp_strm_single_create(comp);
-	if (!comp->stream) {
+		error = zcomp_strm_single_create(comp);
+	if (error) {
 		kfree(comp);
-		return ERR_PTR(-ENOMEM);
+		return ERR_PTR(error);
 	}
 	return comp;
 }
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 0bd88c9..c9c5dd0 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -4,6 +4,7 @@
 
 config BT_INTEL
 	tristate
+	select REGMAP
 
 config BT_BCM
 	tristate
@@ -183,6 +184,7 @@
 config BT_HCIBPA10X
 	tristate "HCI BPA10x USB driver"
 	depends on USB
+	select BT_HCIUART_H4
 	help
 	  Bluetooth HCI BPA10x USB driver.
 	  This driver provides support for the Digianswer BPA 100/105 Bluetooth
@@ -275,7 +277,7 @@
 	  The core driver to support Marvell Bluetooth devices.
 
 	  This driver is required if you want to support
-	  Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897.
+	  Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8997.
 
 	  Say Y here to compile Marvell Bluetooth driver
 	  into the kernel or say M to compile it as module.
@@ -289,7 +291,7 @@
 	  The driver for Marvell Bluetooth chipsets with SDIO interface.
 
 	  This driver is required if you want to use Marvell Bluetooth
-	  devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897
+	  devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8997
 	  chipsets are supported.
 
 	  Say Y here to compile support for Marvell BT-over-SDIO driver
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index a5c4d05..616ec2a 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -422,17 +422,12 @@
 
 	BT_DBG("hdev %p bfusb %p", hdev, data);
 
-	if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	write_lock_irqsave(&data->lock, flags);
 
 	err = bfusb_rx_submit(data, NULL);
 	if (!err) {
 		for (i = 1; i < BFUSB_MAX_BULK_RX; i++)
 			bfusb_rx_submit(data, NULL);
-	} else {
-		clear_bit(HCI_RUNNING, &hdev->flags);
 	}
 
 	write_unlock_irqrestore(&data->lock, flags);
@@ -458,9 +453,6 @@
 
 	BT_DBG("hdev %p bfusb %p", hdev, data);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	write_lock_irqsave(&data->lock, flags);
 	write_unlock_irqrestore(&data->lock, flags);
 
@@ -479,9 +471,6 @@
 
 	BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		hdev->stat.cmd_tx++;
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 35e63aa..36fa1c9 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -390,7 +390,7 @@
 	for (i = 0; i < len; i++) {
 
 		/* Allocate packet */
-		if (info->rx_skb == NULL) {
+		if (!info->rx_skb) {
 			info->rx_state = RECV_WAIT_PACKET_TYPE;
 			info->rx_count = 0;
 			info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
@@ -628,9 +628,6 @@
 	if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
 		bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
 
-	if (test_and_set_bit(HCI_RUNNING, &(hdev->flags)))
-		return 0;
-
 	if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
 		unsigned int iobase = info->p_dev->resource[0]->start;
 
@@ -646,9 +643,6 @@
 {
 	struct bluecard_info *info = hci_get_drvdata(hdev);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
-		return 0;
-
 	bluecard_hci_flush(hdev);
 
 	if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 8a31991..49c397e2 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -35,7 +35,9 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-#define VERSION "0.10"
+#include "hci_uart.h"
+
+#define VERSION "0.11"
 
 static const struct usb_device_id bpa10x_table[] = {
 	/* Tektronix BPA 100/105 (Digianswer) */
@@ -56,112 +58,6 @@
 	struct sk_buff *rx_skb[2];
 };
 
-#define HCI_VENDOR_HDR_SIZE 5
-
-struct hci_vendor_hdr {
-	__u8    type;
-	__le16  snum;
-	__le16  dlen;
-} __packed;
-
-static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count)
-{
-	struct bpa10x_data *data = hci_get_drvdata(hdev);
-
-	BT_DBG("%s queue %d buffer %p count %d", hdev->name,
-							queue, buf, count);
-
-	if (queue < 0 || queue > 1)
-		return -EILSEQ;
-
-	hdev->stat.byte_rx += count;
-
-	while (count) {
-		struct sk_buff *skb = data->rx_skb[queue];
-		struct { __u8 type; int expect; } *scb;
-		int type, len = 0;
-
-		if (!skb) {
-			/* Start of the frame */
-
-			type = *((__u8 *) buf);
-			count--; buf++;
-
-			switch (type) {
-			case HCI_EVENT_PKT:
-				if (count >= HCI_EVENT_HDR_SIZE) {
-					struct hci_event_hdr *h = buf;
-					len = HCI_EVENT_HDR_SIZE + h->plen;
-				} else
-					return -EILSEQ;
-				break;
-
-			case HCI_ACLDATA_PKT:
-				if (count >= HCI_ACL_HDR_SIZE) {
-					struct hci_acl_hdr *h = buf;
-					len = HCI_ACL_HDR_SIZE +
-							__le16_to_cpu(h->dlen);
-				} else
-					return -EILSEQ;
-				break;
-
-			case HCI_SCODATA_PKT:
-				if (count >= HCI_SCO_HDR_SIZE) {
-					struct hci_sco_hdr *h = buf;
-					len = HCI_SCO_HDR_SIZE + h->dlen;
-				} else
-					return -EILSEQ;
-				break;
-
-			case HCI_VENDOR_PKT:
-				if (count >= HCI_VENDOR_HDR_SIZE) {
-					struct hci_vendor_hdr *h = buf;
-					len = HCI_VENDOR_HDR_SIZE +
-							__le16_to_cpu(h->dlen);
-				} else
-					return -EILSEQ;
-				break;
-			}
-
-			skb = bt_skb_alloc(len, GFP_ATOMIC);
-			if (!skb) {
-				BT_ERR("%s no memory for packet", hdev->name);
-				return -ENOMEM;
-			}
-
-			data->rx_skb[queue] = skb;
-
-			scb = (void *) skb->cb;
-			scb->type = type;
-			scb->expect = len;
-		} else {
-			/* Continuation */
-
-			scb = (void *) skb->cb;
-			len = scb->expect;
-		}
-
-		len = min(len, count);
-
-		memcpy(skb_put(skb, len), buf, len);
-
-		scb->expect -= len;
-
-		if (scb->expect == 0) {
-			/* Complete frame */
-
-			data->rx_skb[queue] = NULL;
-
-			bt_cb(skb)->pkt_type = scb->type;
-			hci_recv_frame(hdev, skb);
-		}
-
-		count -= len; buf += len;
-	}
-
-	return 0;
-}
-
 static void bpa10x_tx_complete(struct urb *urb)
 {
 	struct sk_buff *skb = urb->context;
@@ -184,6 +80,22 @@
 	kfree_skb(skb);
 }
 
+#define HCI_VENDOR_HDR_SIZE 5
+
+#define HCI_RECV_VENDOR \
+	.type = HCI_VENDOR_PKT, \
+	.hlen = HCI_VENDOR_HDR_SIZE, \
+	.loff = 3, \
+	.lsize = 2, \
+	.maxlen = HCI_MAX_FRAME_SIZE
+
+static const struct h4_recv_pkt bpa10x_recv_pkts[] = {
+	{ H4_RECV_ACL,     .recv = hci_recv_frame },
+	{ H4_RECV_SCO,     .recv = hci_recv_frame },
+	{ H4_RECV_EVENT,   .recv = hci_recv_frame },
+	{ HCI_RECV_VENDOR, .recv = hci_recv_diag  },
+};
+
 static void bpa10x_rx_complete(struct urb *urb)
 {
 	struct hci_dev *hdev = urb->context;
@@ -197,11 +109,17 @@
 		return;
 
 	if (urb->status == 0) {
-		if (bpa10x_recv(hdev, usb_pipebulk(urb->pipe),
+		bool idx = usb_pipebulk(urb->pipe);
+
+		data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx],
 						urb->transfer_buffer,
-						urb->actual_length) < 0) {
+						urb->actual_length,
+						bpa10x_recv_pkts,
+						ARRAY_SIZE(bpa10x_recv_pkts));
+		if (IS_ERR(data->rx_skb[idx])) {
 			BT_ERR("%s corrupted event packet", hdev->name);
 			hdev->stat.err_rx++;
+			data->rx_skb[idx] = NULL;
 		}
 	}
 
@@ -304,9 +222,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	err = bpa10x_submit_intr_urb(hdev);
 	if (err < 0)
 		goto error;
@@ -320,8 +235,6 @@
 error:
 	usb_kill_anchored_urbs(&data->rx_anchor);
 
-	clear_bit(HCI_RUNNING, &hdev->flags);
-
 	return err;
 }
 
@@ -331,9 +244,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	usb_kill_anchored_urbs(&data->rx_anchor);
 
 	return 0;
@@ -350,6 +260,24 @@
 	return 0;
 }
 
+static int bpa10x_setup(struct hci_dev *hdev)
+{
+	const u8 req[] = { 0x07 };
+	struct sk_buff *skb;
+
+	BT_DBG("%s", hdev->name);
+
+	/* Read revision string */
+	skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+
+	kfree_skb(skb);
+	return 0;
+}
+
 static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct bpa10x_data *data = hci_get_drvdata(hdev);
@@ -360,9 +288,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	skb->dev = (void *) hdev;
 
 	urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -431,6 +356,25 @@
 	return 0;
 }
 
+static int bpa10x_set_diag(struct hci_dev *hdev, bool enable)
+{
+	const u8 req[] = { 0x00, enable };
+	struct sk_buff *skb;
+
+	BT_DBG("%s", hdev->name);
+
+	if (!test_bit(HCI_RUNNING, &hdev->flags))
+		return -ENETDOWN;
+
+	/* Enable sniffer operation */
+	skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	kfree_skb(skb);
+	return 0;
+}
+
 static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
 	struct bpa10x_data *data;
@@ -465,7 +409,9 @@
 	hdev->open     = bpa10x_open;
 	hdev->close    = bpa10x_close;
 	hdev->flush    = bpa10x_flush;
+	hdev->setup    = bpa10x_setup;
 	hdev->send     = bpa10x_send_frame;
+	hdev->set_diag = bpa10x_set_diag;
 
 	set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
 
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index a00bb82..5803aae 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -233,7 +233,7 @@
 		info->hdev->stat.byte_rx++;
 
 		/* Allocate packet */
-		if (info->rx_skb == NULL) {
+		if (!info->rx_skb) {
 			info->rx_state = RECV_WAIT_PACKET_TYPE;
 			info->rx_count = 0;
 			info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
@@ -270,7 +270,6 @@
 				/* Unknown packet */
 				BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
 				info->hdev->stat.err_rx++;
-				clear_bit(HCI_RUNNING, &(info->hdev->flags));
 
 				kfree_skb(info->rx_skb);
 				info->rx_skb = NULL;
@@ -395,17 +394,12 @@
 
 static int bt3c_hci_open(struct hci_dev *hdev)
 {
-	set_bit(HCI_RUNNING, &(hdev->flags));
-
 	return 0;
 }
 
 
 static int bt3c_hci_close(struct hci_dev *hdev)
 {
-	if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
-		return 0;
-
 	bt3c_hci_flush(hdev);
 
 	return 0;
@@ -453,7 +447,8 @@
 {
 	char *ptr = (char *) firmware;
 	char b[9];
-	unsigned int iobase, size, addr, fcs, tmp;
+	unsigned int iobase, tmp;
+	unsigned long size, addr, fcs;
 	int i, err = 0;
 
 	iobase = info->p_dev->resource[0]->start;
@@ -478,15 +473,18 @@
 
 		memset(b, 0, sizeof(b));
 		memcpy(b, ptr + 2, 2);
-		size = simple_strtoul(b, NULL, 16);
+		if (kstrtoul(b, 16, &size) < 0)
+			return -EINVAL;
 
 		memset(b, 0, sizeof(b));
 		memcpy(b, ptr + 4, 8);
-		addr = simple_strtoul(b, NULL, 16);
+		if (kstrtoul(b, 16, &addr) < 0)
+			return -EINVAL;
 
 		memset(b, 0, sizeof(b));
 		memcpy(b, ptr + (size * 2) + 2, 2);
-		fcs = simple_strtoul(b, NULL, 16);
+		if (kstrtoul(b, 16, &fcs) < 0)
+			return -EINVAL;
 
 		memset(b, 0, sizeof(b));
 		for (tmp = 0, i = 0; i < size; i++) {
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 02ed816..2fc363a 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -181,6 +181,27 @@
 	return 0;
 }
 
+static struct sk_buff *btbcm_read_local_name(struct hci_dev *hdev)
+{
+	struct sk_buff *skb;
+
+	skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL,
+			     HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: BCM: Reading local name failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return skb;
+	}
+
+	if (skb->len != sizeof(struct hci_rp_read_local_name)) {
+		BT_ERR("%s: BCM: Local name length mismatch", hdev->name);
+		kfree_skb(skb);
+		return ERR_PTR(-EIO);
+	}
+
+	return skb;
+}
+
 static struct sk_buff *btbcm_read_local_version(struct hci_dev *hdev)
 {
 	struct sk_buff *skb;
@@ -393,6 +414,14 @@
 	BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
 	kfree_skb(skb);
 
+	/* Read Local Name */
+	skb = btbcm_read_local_name(hdev);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+	kfree_skb(skb);
+
 	switch ((rev & 0xf000) >> 12) {
 	case 0:
 	case 3:
@@ -464,6 +493,14 @@
 		hw_name ? : "BCM", (subver & 0x7000) >> 13,
 		(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
 
+	/* Read Local Name */
+	skb = btbcm_read_local_name(hdev);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+	kfree_skb(skb);
+
 	btbcm_check_bdaddr(hdev);
 
 	set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
@@ -475,12 +512,25 @@
 int btbcm_setup_apple(struct hci_dev *hdev)
 {
 	struct sk_buff *skb;
+	int err;
+
+	/* Reset */
+	err = btbcm_reset(hdev);
+	if (err)
+		return err;
 
 	/* Read Verbose Config Version Info */
 	skb = btbcm_read_verbose_config(hdev);
 	if (!IS_ERR(skb)) {
-		BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
-			get_unaligned_le16(skb->data + 5));
+		BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name,
+			skb->data[1], get_unaligned_le16(skb->data + 5));
+		kfree_skb(skb);
+	}
+
+	/* Read Local Name */
+	skb = btbcm_read_local_name(hdev);
+	if (!IS_ERR(skb)) {
+		BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
 		kfree_skb(skb);
 	}
 
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 048423f..7047fe6 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -22,6 +22,8 @@
  */
 
 #include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/regmap.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -169,6 +171,246 @@
 }
 EXPORT_SYMBOL_GPL(btintel_secure_send);
 
+int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name)
+{
+	const struct firmware *fw;
+	struct sk_buff *skb;
+	const u8 *fw_ptr;
+	int err;
+
+	err = request_firmware_direct(&fw, ddc_name, &hdev->dev);
+	if (err < 0) {
+		bt_dev_err(hdev, "Failed to load Intel DDC file %s (%d)",
+			   ddc_name, err);
+		return err;
+	}
+
+	bt_dev_info(hdev, "Found Intel DDC parameters: %s", ddc_name);
+
+	fw_ptr = fw->data;
+
+	/* DDC file contains one or more DDC structure which has
+	 * Length (1 byte), DDC ID (2 bytes), and DDC value (Length - 2).
+	 */
+	while (fw->size > fw_ptr - fw->data) {
+		u8 cmd_plen = fw_ptr[0] + sizeof(u8);
+
+		skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr,
+				     HCI_INIT_TIMEOUT);
+		if (IS_ERR(skb)) {
+			bt_dev_err(hdev, "Failed to send Intel_Write_DDC (%ld)",
+				   PTR_ERR(skb));
+			release_firmware(fw);
+			return PTR_ERR(skb);
+		}
+
+		fw_ptr += cmd_plen;
+		kfree_skb(skb);
+	}
+
+	release_firmware(fw);
+
+	bt_dev_info(hdev, "Applying Intel DDC parameters completed");
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_load_ddc_config);
+
+/* ------- REGMAP IBT SUPPORT ------- */
+
+#define IBT_REG_MODE_8BIT  0x00
+#define IBT_REG_MODE_16BIT 0x01
+#define IBT_REG_MODE_32BIT 0x02
+
+struct regmap_ibt_context {
+	struct hci_dev *hdev;
+	__u16 op_write;
+	__u16 op_read;
+};
+
+struct ibt_cp_reg_access {
+	__le32  addr;
+	__u8    mode;
+	__u8    len;
+	__u8    data[0];
+} __packed;
+
+struct ibt_rp_reg_access {
+	__u8    status;
+	__le32  addr;
+	__u8    data[0];
+} __packed;
+
+static int regmap_ibt_read(void *context, const void *addr, size_t reg_size,
+			   void *val, size_t val_size)
+{
+	struct regmap_ibt_context *ctx = context;
+	struct ibt_cp_reg_access cp;
+	struct ibt_rp_reg_access *rp;
+	struct sk_buff *skb;
+	int err = 0;
+
+	if (reg_size != sizeof(__le32))
+		return -EINVAL;
+
+	switch (val_size) {
+	case 1:
+		cp.mode = IBT_REG_MODE_8BIT;
+		break;
+	case 2:
+		cp.mode = IBT_REG_MODE_16BIT;
+		break;
+	case 4:
+		cp.mode = IBT_REG_MODE_32BIT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* regmap provides a little-endian formatted addr */
+	cp.addr = *(__le32 *)addr;
+	cp.len = val_size;
+
+	bt_dev_dbg(ctx->hdev, "Register (0x%x) read", le32_to_cpu(cp.addr));
+
+	skb = hci_cmd_sync(ctx->hdev, ctx->op_read, sizeof(cp), &cp,
+			   HCI_CMD_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error (%d)",
+			   le32_to_cpu(cp.addr), err);
+		return err;
+	}
+
+	if (skb->len != sizeof(*rp) + val_size) {
+		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad len",
+			   le32_to_cpu(cp.addr));
+		err = -EINVAL;
+		goto done;
+	}
+
+	rp = (struct ibt_rp_reg_access *)skb->data;
+
+	if (rp->addr != cp.addr) {
+		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad addr",
+			   le32_to_cpu(rp->addr));
+		err = -EINVAL;
+		goto done;
+	}
+
+	memcpy(val, rp->data, val_size);
+
+done:
+	kfree_skb(skb);
+	return err;
+}
+
+static int regmap_ibt_gather_write(void *context,
+				   const void *addr, size_t reg_size,
+				   const void *val, size_t val_size)
+{
+	struct regmap_ibt_context *ctx = context;
+	struct ibt_cp_reg_access *cp;
+	struct sk_buff *skb;
+	int plen = sizeof(*cp) + val_size;
+	u8 mode;
+	int err = 0;
+
+	if (reg_size != sizeof(__le32))
+		return -EINVAL;
+
+	switch (val_size) {
+	case 1:
+		mode = IBT_REG_MODE_8BIT;
+		break;
+	case 2:
+		mode = IBT_REG_MODE_16BIT;
+		break;
+	case 4:
+		mode = IBT_REG_MODE_32BIT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	cp = kmalloc(plen, GFP_KERNEL);
+	if (!cp)
+		return -ENOMEM;
+
+	/* regmap provides a little-endian formatted addr/value */
+	cp->addr = *(__le32 *)addr;
+	cp->mode = mode;
+	cp->len = val_size;
+	memcpy(&cp->data, val, val_size);
+
+	bt_dev_dbg(ctx->hdev, "Register (0x%x) write", le32_to_cpu(cp->addr));
+
+	skb = hci_cmd_sync(ctx->hdev, ctx->op_write, plen, cp, HCI_CMD_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) write error (%d)",
+			   le32_to_cpu(cp->addr), err);
+		goto done;
+	}
+	kfree_skb(skb);
+
+done:
+	kfree(cp);
+	return err;
+}
+
+static int regmap_ibt_write(void *context, const void *data, size_t count)
+{
+	/* data contains register+value, since we only support 32bit addr,
+	 * minimum data size is 4 bytes.
+	 */
+	if (WARN_ONCE(count < 4, "Invalid register access"))
+		return -EINVAL;
+
+	return regmap_ibt_gather_write(context, data, 4, data + 4, count - 4);
+}
+
+static void regmap_ibt_free_context(void *context)
+{
+	kfree(context);
+}
+
+static struct regmap_bus regmap_ibt = {
+	.read = regmap_ibt_read,
+	.write = regmap_ibt_write,
+	.gather_write = regmap_ibt_gather_write,
+	.free_context = regmap_ibt_free_context,
+	.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+	.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+/* Config is the same for all register regions */
+static const struct regmap_config regmap_ibt_cfg = {
+	.name      = "btintel_regmap",
+	.reg_bits  = 32,
+	.val_bits  = 32,
+};
+
+struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
+				   u16 opcode_write)
+{
+	struct regmap_ibt_context *ctx;
+
+	bt_dev_info(hdev, "regmap: Init R%x-W%x region", opcode_read,
+		    opcode_write);
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	ctx->op_read = opcode_read;
+	ctx->op_write = opcode_write;
+	ctx->hdev = hdev;
+
+	return regmap_init(&hdev->dev, &regmap_ibt, ctx, &regmap_ibt_cfg);
+}
+EXPORT_SYMBOL_GPL(btintel_regmap_init);
+
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
 MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index b278d14..f0655c4 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -78,6 +78,10 @@
 void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
 int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
 			const void *param);
+int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name);
+
+struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
+				   u16 opcode_write);
 
 #else
 
@@ -95,7 +99,8 @@
 {
 }
 
-static void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+static inline void btintel_version_info(struct hci_dev *hdev,
+					struct intel_version *ver)
 {
 }
 
@@ -105,4 +110,16 @@
 	return -EOPNOTSUPP;
 }
 
+static inline int btintel_load_ddc_config(struct hci_dev *hdev,
+					  const char *ddc_name)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev,
+						 u16 opcode_read,
+						 u16 opcode_write)
+{
+	return ERR_PTR(-EINVAL);
+}
 #endif
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index de05deb..6ba2286 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -184,7 +184,7 @@
 	}
 
 	skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
-	if (skb == NULL) {
+	if (!skb) {
 		BT_ERR("No free skb");
 		return -ENOMEM;
 	}
@@ -377,20 +377,6 @@
 		return -EINVAL;
 	}
 
-	if (skb_headroom(skb) < BTM_HEADER_LEN) {
-		struct sk_buff *tmp = skb;
-
-		skb = skb_realloc_headroom(skb, BTM_HEADER_LEN);
-		if (!skb) {
-			BT_ERR("Tx Error: realloc_headroom failed %d",
-				BTM_HEADER_LEN);
-			skb = tmp;
-			return -EINVAL;
-		}
-
-		kfree_skb(tmp);
-	}
-
 	skb_push(skb, BTM_HEADER_LEN);
 
 	/* header type: byte[3]
@@ -450,13 +436,6 @@
 
 	BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
-		BT_ERR("Failed testing HCI_RUNING, flags=%lx", hdev->flags);
-		print_hex_dump_bytes("data: ", DUMP_PREFIX_OFFSET,
-							skb->data, skb->len);
-		return -EBUSY;
-	}
-
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		hdev->stat.cmd_tx++;
@@ -491,9 +470,6 @@
 {
 	struct btmrvl_private *priv = hci_get_drvdata(hdev);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	skb_queue_purge(&priv->adapter->tx_queue);
 
 	return 0;
@@ -501,8 +477,6 @@
 
 static int btmrvl_open(struct hci_dev *hdev)
 {
-	set_bit(HCI_RUNNING, &hdev->flags);
-
 	return 0;
 }
 
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index b9978a7..71ea2a3 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -146,6 +146,29 @@
 	.fw_dump_end = 0xea,
 };
 
+static const struct btmrvl_sdio_card_reg btmrvl_reg_8997 = {
+	.cfg = 0x00,
+	.host_int_mask = 0x08,
+	.host_intstatus = 0x0c,
+	.card_status = 0x5c,
+	.sq_read_base_addr_a0 = 0xf8,
+	.sq_read_base_addr_a1 = 0xf9,
+	.card_revision = 0xc8,
+	.card_fw_status0 = 0xe8,
+	.card_fw_status1 = 0xe9,
+	.card_rx_len = 0xea,
+	.card_rx_unit = 0xeb,
+	.io_port_0 = 0xe4,
+	.io_port_1 = 0xe5,
+	.io_port_2 = 0xe6,
+	.int_read_to_clear = true,
+	.host_int_rsr = 0x04,
+	.card_misc_cfg = 0xD8,
+	.fw_dump_ctrl = 0xf0,
+	.fw_dump_start = 0xf1,
+	.fw_dump_end = 0xf8,
+};
+
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
 	.helper		= "mrvl/sd8688_helper.bin",
 	.firmware	= "mrvl/sd8688.bin",
@@ -191,25 +214,37 @@
 	.supports_fw_dump = true,
 };
 
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
+	.helper         = NULL,
+	.firmware       = "mrvl/sd8997_uapsta.bin",
+	.reg            = &btmrvl_reg_8997,
+	.support_pscan_win_report = true,
+	.sd_blksz_fw_dl = 256,
+	.supports_fw_dump = true,
+};
+
 static const struct sdio_device_id btmrvl_sdio_ids[] = {
 	/* Marvell SD8688 Bluetooth device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
-			.driver_data = (unsigned long) &btmrvl_sdio_sd8688 },
+			.driver_data = (unsigned long)&btmrvl_sdio_sd8688 },
 	/* Marvell SD8787 Bluetooth device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
-			.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+			.driver_data = (unsigned long)&btmrvl_sdio_sd8787 },
 	/* Marvell SD8787 Bluetooth AMP device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B),
-			.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+			.driver_data = (unsigned long)&btmrvl_sdio_sd8787 },
 	/* Marvell SD8797 Bluetooth device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
-			.driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
+			.driver_data = (unsigned long)&btmrvl_sdio_sd8797 },
 	/* Marvell SD8887 Bluetooth device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9136),
 			.driver_data = (unsigned long)&btmrvl_sdio_sd8887 },
 	/* Marvell SD8897 Bluetooth device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E),
-			.driver_data = (unsigned long) &btmrvl_sdio_sd8897 },
+			.driver_data = (unsigned long)&btmrvl_sdio_sd8897 },
+	/* Marvell SD8997 Bluetooth device */
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9142),
+			.driver_data = (unsigned long)&btmrvl_sdio_sd8997 },
 
 	{ }	/* Terminating entry */
 };
@@ -619,7 +654,7 @@
 
 	/* Allocate buffer */
 	skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC);
-	if (skb == NULL) {
+	if (!skb) {
 		BT_ERR("No free skb");
 		ret = -ENOMEM;
 		goto exit;
@@ -1278,6 +1313,12 @@
 
 		if (memory_size == 0) {
 			BT_INFO("Firmware dump finished!");
+			sdio_writeb(card->func, FW_DUMP_READ_DONE,
+				    card->reg->fw_dump_ctrl, &ret);
+			if (ret) {
+				BT_ERR("SDIO Write MEMDUMP_FINISH ERR");
+				goto done;
+			}
 			break;
 		}
 
@@ -1616,3 +1657,4 @@
 MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
 MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
 MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 83f6437..7b62442 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -194,21 +194,15 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	sdio_claim_host(data->func);
 
 	err = sdio_enable_func(data->func);
-	if (err < 0) {
-		clear_bit(HCI_RUNNING, &hdev->flags);
+	if (err < 0)
 		goto release;
-	}
 
 	err = sdio_claim_irq(data->func, btsdio_interrupt);
 	if (err < 0) {
 		sdio_disable_func(data->func);
-		clear_bit(HCI_RUNNING, &hdev->flags);
 		goto release;
 	}
 
@@ -229,9 +223,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	sdio_claim_host(data->func);
 
 	sdio_writeb(data->func, 0x00, REG_EN_INTRD, NULL);
@@ -261,9 +252,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		hdev->stat.cmd_tx++;
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index abb4d21..bb8e402 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -38,7 +38,7 @@
 #include <linux/serial.h>
 #include <linux/serial_reg.h>
 #include <linux/bitops.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <pcmcia/cistpl.h>
 #include <pcmcia/ciscode.h>
@@ -188,7 +188,7 @@
 		info->hdev->stat.byte_rx++;
 
 		/* Allocate packet */
-		if (info->rx_skb == NULL) {
+		if (!info->rx_skb) {
 			info->rx_state = RECV_WAIT_PACKET_TYPE;
 			info->rx_count = 0;
 			info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
@@ -223,7 +223,6 @@
 				/* Unknown packet */
 				BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
 				info->hdev->stat.err_rx++;
-				clear_bit(HCI_RUNNING, &(info->hdev->flags));
 
 				kfree_skb(info->rx_skb);
 				info->rx_skb = NULL;
@@ -409,17 +408,12 @@
 
 static int btuart_hci_open(struct hci_dev *hdev)
 {
-	set_bit(HCI_RUNNING, &(hdev->flags));
-
 	return 0;
 }
 
 
 static int btuart_hci_close(struct hci_dev *hdev)
 {
-	if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
-		return 0;
-
 	btuart_hci_flush(hdev);
 
 	return 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index b6aceaf..247b106 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -940,9 +940,6 @@
 
 	data->intf->needs_remote_wakeup = 1;
 
-	if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
-		goto done;
-
 	if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
 		goto done;
 
@@ -965,7 +962,6 @@
 
 failed:
 	clear_bit(BTUSB_INTR_RUNNING, &data->flags);
-	clear_bit(HCI_RUNNING, &hdev->flags);
 	usb_autopm_put_interface(data->intf);
 	return err;
 }
@@ -984,9 +980,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	cancel_work_sync(&data->work);
 	cancel_work_sync(&data->waker);
 
@@ -1156,9 +1149,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		urb = alloc_ctrl_urb(hdev, skb);
@@ -1277,6 +1267,20 @@
 			clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
 			usb_kill_anchored_urbs(&data->isoc_anchor);
 
+			/* When isochronous alternate setting needs to be
+			 * changed, because SCO connection has been added
+			 * or removed, a packet fragment may be left in the
+			 * reassembling state. This could lead to wrongly
+			 * assembled fragments.
+			 *
+			 * Clear outstanding fragment when selecting a new
+			 * alternate setting.
+			 */
+			spin_lock(&data->rxlock);
+			kfree_skb(data->sco_skb);
+			data->sco_skb = NULL;
+			spin_unlock(&data->rxlock);
+
 			if (__set_isoc_interface(hdev, new_alts) < 0)
 				return;
 		}
@@ -1348,7 +1352,9 @@
 
 	rp = (struct hci_rp_read_local_version *)skb->data;
 
-	if (le16_to_cpu(rp->manufacturer) != 10) {
+	/* Detect controllers which aren't real CSR ones. */
+	if (le16_to_cpu(rp->manufacturer) != 10 ||
+	    le16_to_cpu(rp->lmp_subver) == 0x0c5c) {
 		/* Clear the reset quirk since this is not an actual
 		 * early Bluetooth 1.1 device from CSR.
 		 */
@@ -1827,9 +1833,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {
@@ -2217,36 +2220,7 @@
 	 * The device can work without DDC parameters, so even if it fails
 	 * to load the file, no need to fail the setup.
 	 */
-	err = request_firmware_direct(&fw, fwname, &hdev->dev);
-	if (err < 0)
-		return 0;
-
-	BT_INFO("%s: Found Intel DDC parameters: %s", hdev->name, fwname);
-
-	fw_ptr = fw->data;
-
-	/* DDC file contains one or more DDC structure which has
-	 * Length (1 byte), DDC ID (2 bytes), and DDC value (Length - 2).
-	 */
-	while (fw->size > fw_ptr - fw->data) {
-		u8 cmd_plen = fw_ptr[0] + sizeof(u8);
-
-		skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr,
-				     HCI_INIT_TIMEOUT);
-		if (IS_ERR(skb)) {
-			BT_ERR("%s: Failed to send Intel_Write_DDC (%ld)",
-			       hdev->name, PTR_ERR(skb));
-			release_firmware(fw);
-			return PTR_ERR(skb);
-		}
-
-		fw_ptr += cmd_plen;
-		kfree_skb(skb);
-	}
-
-	release_firmware(fw);
-
-	BT_INFO("%s: Applying Intel DDC parameters completed", hdev->name);
+	btintel_load_ddc_config(hdev, fwname);
 
 	return 0;
 }
@@ -2782,7 +2756,7 @@
 			set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
 
 		/* Fake CSR devices with broken commands */
-		if (bcdDevice <= 0x100)
+		if (bcdDevice <= 0x100 || bcdDevice == 0x134)
 			hdev->setup = btusb_setup_csr;
 
 		set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 7a722df..57eb935 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -155,9 +155,6 @@
 
 	BT_DBG("%s %p", hdev->name, hdev);
 
-	if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	/* provide contexts for callbacks from ST */
 	hst = hci_get_drvdata(hdev);
 
@@ -181,7 +178,6 @@
 			goto done;
 
 		if (err != -EINPROGRESS) {
-			clear_bit(HCI_RUNNING, &hdev->flags);
 			BT_ERR("st_register failed %d", err);
 			return err;
 		}
@@ -195,7 +191,6 @@
 			(&hst->wait_reg_completion,
 			 msecs_to_jiffies(BT_REGISTER_TIMEOUT));
 		if (!timeleft) {
-			clear_bit(HCI_RUNNING, &hdev->flags);
 			BT_ERR("Timeout(%d sec),didn't get reg "
 					"completion signal from ST",
 					BT_REGISTER_TIMEOUT / 1000);
@@ -205,7 +200,6 @@
 		/* Is ST registration callback
 		 * called with ERROR status? */
 		if (hst->reg_status != 0) {
-			clear_bit(HCI_RUNNING, &hdev->flags);
 			BT_ERR("ST registration completed with invalid "
 					"status %d", hst->reg_status);
 			return -EAGAIN;
@@ -215,7 +209,6 @@
 		hst->st_write = ti_st_proto[i].write;
 		if (!hst->st_write) {
 			BT_ERR("undefined ST write function");
-			clear_bit(HCI_RUNNING, &hdev->flags);
 			for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
 				/* Undo registration with ST */
 				err = st_unregister(&ti_st_proto[i]);
@@ -236,9 +229,6 @@
 	int err, i;
 	struct ti_st *hst = hci_get_drvdata(hdev);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
 		err = st_unregister(&ti_st_proto[i]);
 		if (err)
@@ -256,9 +246,6 @@
 	struct ti_st *hst;
 	long len;
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	hst = hci_get_drvdata(hdev);
 
 	/* Prepend skb with frame type */
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 84135c5..5026f66 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -357,8 +357,6 @@
 
 static int dtl1_hci_open(struct hci_dev *hdev)
 {
-	set_bit(HCI_RUNNING, &(hdev->flags));
-
 	return 0;
 }
 
@@ -376,9 +374,6 @@
 
 static int dtl1_hci_close(struct hci_dev *hdev)
 {
-	if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
-		return 0;
-
 	dtl1_hci_flush(hdev);
 
 	return 0;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 835bfab..645e66e 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -31,6 +31,9 @@
 #include <linux/clk.h>
 #include <linux/gpio/consumer.h>
 #include <linux/tty.h>
+#include <linux/interrupt.h>
+#include <linux/dmi.h>
+#include <linux/pm_runtime.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -38,6 +41,11 @@
 #include "btbcm.h"
 #include "hci_uart.h"
 
+#define BCM_LM_DIAG_PKT 0x07
+#define BCM_LM_DIAG_SIZE 63
+
+#define BCM_AUTOSUSPEND_DELAY	5000 /* default autosleep delay */
+
 struct bcm_device {
 	struct list_head	list;
 
@@ -51,8 +59,10 @@
 	bool			clk_enabled;
 
 	u32			init_speed;
+	int			irq;
+	u8			irq_polarity;
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
 	struct hci_uart		*hu;
 	bool			is_suspended; /* suspend/resume flag */
 #endif
@@ -66,7 +76,7 @@
 };
 
 /* List of BCM BT UART devices */
-static DEFINE_SPINLOCK(bcm_device_lock);
+static DEFINE_MUTEX(bcm_device_lock);
 static LIST_HEAD(bcm_device_list);
 
 static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
@@ -80,7 +90,7 @@
 
 		clock.type = BCM_UART_CLOCK_48MHZ;
 
-		BT_DBG("%s: Set Controller clock (%d)", hdev->name, clock.type);
+		bt_dev_dbg(hdev, "Set Controller clock (%d)", clock.type);
 
 		/* This Broadcom specific command changes the UART's controller
 		 * clock for baud rate > 3000000.
@@ -88,15 +98,15 @@
 		skb = __hci_cmd_sync(hdev, 0xfc45, 1, &clock, HCI_INIT_TIMEOUT);
 		if (IS_ERR(skb)) {
 			int err = PTR_ERR(skb);
-			BT_ERR("%s: BCM: failed to write clock command (%d)",
-			       hdev->name, err);
+			bt_dev_err(hdev, "BCM: failed to write clock (%d)",
+				   err);
 			return err;
 		}
 
 		kfree_skb(skb);
 	}
 
-	BT_DBG("%s: Set Controller UART speed to %d bit/s", hdev->name, speed);
+	bt_dev_dbg(hdev, "Set Controller UART speed to %d bit/s", speed);
 
 	param.zero = cpu_to_le16(0);
 	param.baud_rate = cpu_to_le32(speed);
@@ -108,8 +118,8 @@
 			     HCI_INIT_TIMEOUT);
 	if (IS_ERR(skb)) {
 		int err = PTR_ERR(skb);
-		BT_ERR("%s: BCM: failed to write update baudrate command (%d)",
-		       hdev->name, err);
+		bt_dev_err(hdev, "BCM: failed to write update baudrate (%d)",
+			   err);
 		return err;
 	}
 
@@ -149,12 +159,125 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static irqreturn_t bcm_host_wake(int irq, void *data)
+{
+	struct bcm_device *bdev = data;
+
+	bt_dev_dbg(bdev, "Host wake IRQ");
+
+	pm_runtime_get(&bdev->pdev->dev);
+	pm_runtime_mark_last_busy(&bdev->pdev->dev);
+	pm_runtime_put_autosuspend(&bdev->pdev->dev);
+
+	return IRQ_HANDLED;
+}
+
+static int bcm_request_irq(struct bcm_data *bcm)
+{
+	struct bcm_device *bdev = bcm->dev;
+	int err = 0;
+
+	/* If this is not a platform device, do not enable PM functionalities */
+	mutex_lock(&bcm_device_lock);
+	if (!bcm_device_exists(bdev)) {
+		err = -ENODEV;
+		goto unlock;
+	}
+
+	if (bdev->irq > 0) {
+		err = devm_request_irq(&bdev->pdev->dev, bdev->irq,
+				       bcm_host_wake, IRQF_TRIGGER_RISING,
+				       "host_wake", bdev);
+		if (err)
+			goto unlock;
+
+		device_init_wakeup(&bdev->pdev->dev, true);
+
+		pm_runtime_set_autosuspend_delay(&bdev->pdev->dev,
+						 BCM_AUTOSUSPEND_DELAY);
+		pm_runtime_use_autosuspend(&bdev->pdev->dev);
+		pm_runtime_set_active(&bdev->pdev->dev);
+		pm_runtime_enable(&bdev->pdev->dev);
+	}
+
+unlock:
+	mutex_unlock(&bcm_device_lock);
+
+	return err;
+}
+
+static const struct bcm_set_sleep_mode default_sleep_params = {
+	.sleep_mode = 1,	/* 0=Disabled, 1=UART, 2=Reserved, 3=USB */
+	.idle_host = 2,		/* idle threshold HOST, in 300ms */
+	.idle_dev = 2,		/* idle threshold device, in 300ms */
+	.bt_wake_active = 1,	/* BT_WAKE active mode: 1 = high, 0 = low */
+	.host_wake_active = 0,	/* HOST_WAKE active mode: 1 = high, 0 = low */
+	.allow_host_sleep = 1,	/* Allow host sleep in SCO flag */
+	.combine_modes = 1,	/* Combine sleep and LPM flag */
+	.tristate_control = 0,	/* Allow tri-state control of UART tx flag */
+	/* Irrelevant USB flags */
+	.usb_auto_sleep = 0,
+	.usb_resume_timeout = 0,
+	.pulsed_host_wake = 0,
+	.break_to_host = 0
+};
+
+static int bcm_setup_sleep(struct hci_uart *hu)
+{
+	struct bcm_data *bcm = hu->priv;
+	struct sk_buff *skb;
+	struct bcm_set_sleep_mode sleep_params = default_sleep_params;
+
+	sleep_params.host_wake_active = !bcm->dev->irq_polarity;
+
+	skb = __hci_cmd_sync(hu->hdev, 0xfc27, sizeof(sleep_params),
+			     &sleep_params, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		int err = PTR_ERR(skb);
+		bt_dev_err(hu->hdev, "Sleep VSC failed (%d)", err);
+		return err;
+	}
+	kfree_skb(skb);
+
+	bt_dev_dbg(hu->hdev, "Set Sleep Parameters VSC succeeded");
+
+	return 0;
+}
+#else
+static inline int bcm_request_irq(struct bcm_data *bcm) { return 0; }
+static inline int bcm_setup_sleep(struct hci_uart *hu) { return 0; }
+#endif
+
+static int bcm_set_diag(struct hci_dev *hdev, bool enable)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+	struct bcm_data *bcm = hu->priv;
+	struct sk_buff *skb;
+
+	if (!test_bit(HCI_RUNNING, &hdev->flags))
+		return -ENETDOWN;
+
+	skb = bt_skb_alloc(3, GFP_KERNEL);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	*skb_put(skb, 1) = BCM_LM_DIAG_PKT;
+	*skb_put(skb, 1) = 0xf0;
+	*skb_put(skb, 1) = enable;
+
+	skb_queue_tail(&bcm->txq, skb);
+	hci_uart_tx_wakeup(hu);
+
+	return 0;
+}
+
 static int bcm_open(struct hci_uart *hu)
 {
 	struct bcm_data *bcm;
 	struct list_head *p;
 
-	BT_DBG("hu %p", hu);
+	bt_dev_dbg(hu->hdev, "hu %p", hu);
 
 	bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
 	if (!bcm)
@@ -164,7 +287,7 @@
 
 	hu->priv = bcm;
 
-	spin_lock(&bcm_device_lock);
+	mutex_lock(&bcm_device_lock);
 	list_for_each(p, &bcm_device_list) {
 		struct bcm_device *dev = list_entry(p, struct bcm_device, list);
 
@@ -175,17 +298,15 @@
 		if (hu->tty->dev->parent == dev->pdev->dev.parent) {
 			bcm->dev = dev;
 			hu->init_speed = dev->init_speed;
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
 			dev->hu = hu;
 #endif
+			bcm_gpio_set_power(bcm->dev, true);
 			break;
 		}
 	}
 
-	if (bcm->dev)
-		bcm_gpio_set_power(bcm->dev, true);
-
-	spin_unlock(&bcm_device_lock);
+	mutex_unlock(&bcm_device_lock);
 
 	return 0;
 }
@@ -193,18 +314,27 @@
 static int bcm_close(struct hci_uart *hu)
 {
 	struct bcm_data *bcm = hu->priv;
+	struct bcm_device *bdev = bcm->dev;
 
-	BT_DBG("hu %p", hu);
+	bt_dev_dbg(hu->hdev, "hu %p", hu);
 
 	/* Protect bcm->dev against removal of the device or driver */
-	spin_lock(&bcm_device_lock);
-	if (bcm_device_exists(bcm->dev)) {
-		bcm_gpio_set_power(bcm->dev, false);
-#ifdef CONFIG_PM_SLEEP
-		bcm->dev->hu = NULL;
+	mutex_lock(&bcm_device_lock);
+	if (bcm_device_exists(bdev)) {
+		bcm_gpio_set_power(bdev, false);
+#ifdef CONFIG_PM
+		pm_runtime_disable(&bdev->pdev->dev);
+		pm_runtime_set_suspended(&bdev->pdev->dev);
+
+		if (device_can_wakeup(&bdev->pdev->dev)) {
+			devm_free_irq(&bdev->pdev->dev, bdev->irq, bdev);
+			device_init_wakeup(&bdev->pdev->dev, false);
+		}
+
+		bdev->hu = NULL;
 #endif
 	}
-	spin_unlock(&bcm_device_lock);
+	mutex_unlock(&bcm_device_lock);
 
 	skb_queue_purge(&bcm->txq);
 	kfree_skb(bcm->rx_skb);
@@ -218,7 +348,7 @@
 {
 	struct bcm_data *bcm = hu->priv;
 
-	BT_DBG("hu %p", hu);
+	bt_dev_dbg(hu->hdev, "hu %p", hu);
 
 	skb_queue_purge(&bcm->txq);
 
@@ -227,13 +357,15 @@
 
 static int bcm_setup(struct hci_uart *hu)
 {
+	struct bcm_data *bcm = hu->priv;
 	char fw_name[64];
 	const struct firmware *fw;
 	unsigned int speed;
 	int err;
 
-	BT_DBG("hu %p", hu);
+	bt_dev_dbg(hu->hdev, "hu %p", hu);
 
+	hu->hdev->set_diag = bcm_set_diag;
 	hu->hdev->set_bdaddr = btbcm_set_bdaddr;
 
 	err = btbcm_initialize(hu->hdev, fw_name, sizeof(fw_name));
@@ -242,13 +374,13 @@
 
 	err = request_firmware(&fw, fw_name, &hu->hdev->dev);
 	if (err < 0) {
-		BT_INFO("%s: BCM: Patch %s not found", hu->hdev->name, fw_name);
+		bt_dev_info(hu->hdev, "BCM: Patch %s not found", fw_name);
 		return 0;
 	}
 
 	err = btbcm_patchram(hu->hdev, fw);
 	if (err) {
-		BT_INFO("%s: BCM: Patch failed (%d)", hu->hdev->name, err);
+		bt_dev_info(hu->hdev, "BCM: Patch failed (%d)", err);
 		goto finalize;
 	}
 
@@ -281,14 +413,28 @@
 	release_firmware(fw);
 
 	err = btbcm_finalize(hu->hdev);
+	if (err)
+		return err;
+
+	err = bcm_request_irq(bcm);
+	if (!err)
+		err = bcm_setup_sleep(hu);
 
 	return err;
 }
 
+#define BCM_RECV_LM_DIAG \
+	.type = BCM_LM_DIAG_PKT, \
+	.hlen = BCM_LM_DIAG_SIZE, \
+	.loff = 0, \
+	.lsize = 0, \
+	.maxlen = BCM_LM_DIAG_SIZE
+
 static const struct h4_recv_pkt bcm_recv_pkts[] = {
-	{ H4_RECV_ACL,   .recv = hci_recv_frame },
-	{ H4_RECV_SCO,   .recv = hci_recv_frame },
-	{ H4_RECV_EVENT, .recv = hci_recv_frame },
+	{ H4_RECV_ACL,      .recv = hci_recv_frame },
+	{ H4_RECV_SCO,      .recv = hci_recv_frame },
+	{ H4_RECV_EVENT,    .recv = hci_recv_frame },
+	{ BCM_RECV_LM_DIAG, .recv = hci_recv_diag  },
 };
 
 static int bcm_recv(struct hci_uart *hu, const void *data, int count)
@@ -302,9 +448,18 @@
 				  bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
 	if (IS_ERR(bcm->rx_skb)) {
 		int err = PTR_ERR(bcm->rx_skb);
-		BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+		bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
 		bcm->rx_skb = NULL;
 		return err;
+	} else if (!bcm->rx_skb) {
+		/* Delay auto-suspend when receiving completed packet */
+		mutex_lock(&bcm_device_lock);
+		if (bcm->dev && bcm_device_exists(bcm->dev)) {
+			pm_runtime_get(&bcm->dev->pdev->dev);
+			pm_runtime_mark_last_busy(&bcm->dev->pdev->dev);
+			pm_runtime_put_autosuspend(&bcm->dev->pdev->dev);
+		}
+		mutex_unlock(&bcm_device_lock);
 	}
 
 	return count;
@@ -314,7 +469,7 @@
 {
 	struct bcm_data *bcm = hu->priv;
 
-	BT_DBG("hu %p skb %p", hu, skb);
+	bt_dev_dbg(hu->hdev, "hu %p skb %p", hu, skb);
 
 	/* Prepend skb with frame type */
 	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -326,39 +481,105 @@
 static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
 {
 	struct bcm_data *bcm = hu->priv;
+	struct sk_buff *skb = NULL;
+	struct bcm_device *bdev = NULL;
 
-	return skb_dequeue(&bcm->txq);
+	mutex_lock(&bcm_device_lock);
+
+	if (bcm_device_exists(bcm->dev)) {
+		bdev = bcm->dev;
+		pm_runtime_get_sync(&bdev->pdev->dev);
+		/* Shall be resumed here */
+	}
+
+	skb = skb_dequeue(&bcm->txq);
+
+	if (bdev) {
+		pm_runtime_mark_last_busy(&bdev->pdev->dev);
+		pm_runtime_put_autosuspend(&bdev->pdev->dev);
+	}
+
+	mutex_unlock(&bcm_device_lock);
+
+	return skb;
 }
 
-#ifdef CONFIG_PM_SLEEP
-/* Platform suspend callback */
-static int bcm_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int bcm_suspend_device(struct device *dev)
 {
 	struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
 
-	BT_DBG("suspend (%p): is_suspended %d", bdev, bdev->is_suspended);
+	bt_dev_dbg(bdev, "");
 
-	spin_lock(&bcm_device_lock);
-
-	if (!bdev->hu)
-		goto unlock;
-
-	if (!bdev->is_suspended) {
+	if (!bdev->is_suspended && bdev->hu) {
 		hci_uart_set_flow_control(bdev->hu, true);
 
-		/* Once this callback returns, driver suspends BT via GPIO */
+		/* Once this returns, driver suspends BT via GPIO */
 		bdev->is_suspended = true;
 	}
 
 	/* Suspend the device */
 	if (bdev->device_wakeup) {
 		gpiod_set_value(bdev->device_wakeup, false);
-		BT_DBG("suspend, delaying 15 ms");
+		bt_dev_dbg(bdev, "suspend, delaying 15 ms");
 		mdelay(15);
 	}
 
+	return 0;
+}
+
+static int bcm_resume_device(struct device *dev)
+{
+	struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+	bt_dev_dbg(bdev, "");
+
+	if (bdev->device_wakeup) {
+		gpiod_set_value(bdev->device_wakeup, true);
+		bt_dev_dbg(bdev, "resume, delaying 15 ms");
+		mdelay(15);
+	}
+
+	/* When this executes, the device has woken up already */
+	if (bdev->is_suspended && bdev->hu) {
+		bdev->is_suspended = false;
+
+		hci_uart_set_flow_control(bdev->hu, false);
+	}
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+/* Platform suspend callback */
+static int bcm_suspend(struct device *dev)
+{
+	struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+	int error;
+
+	bt_dev_dbg(bdev, "suspend: is_suspended %d", bdev->is_suspended);
+
+	/* bcm_suspend can be called at any time as long as platform device is
+	 * bound, so it should use bcm_device_lock to protect access to hci_uart
+	 * and device_wake-up GPIO.
+	 */
+	mutex_lock(&bcm_device_lock);
+
+	if (!bdev->hu)
+		goto unlock;
+
+	if (pm_runtime_active(dev))
+		bcm_suspend_device(dev);
+
+	if (device_may_wakeup(&bdev->pdev->dev)) {
+		error = enable_irq_wake(bdev->irq);
+		if (!error)
+			bt_dev_dbg(bdev, "BCM irq: enabled");
+	}
+
 unlock:
-	spin_unlock(&bcm_device_lock);
+	mutex_unlock(&bcm_device_lock);
 
 	return 0;
 }
@@ -368,28 +589,30 @@
 {
 	struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
 
-	BT_DBG("resume (%p): is_suspended %d", bdev, bdev->is_suspended);
+	bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended);
 
-	spin_lock(&bcm_device_lock);
+	/* bcm_resume can be called at any time as long as platform device is
+	 * bound, so it should use bcm_device_lock to protect access to hci_uart
+	 * and device_wake-up GPIO.
+	 */
+	mutex_lock(&bcm_device_lock);
 
 	if (!bdev->hu)
 		goto unlock;
 
-	if (bdev->device_wakeup) {
-		gpiod_set_value(bdev->device_wakeup, true);
-		BT_DBG("resume, delaying 15 ms");
-		mdelay(15);
+	if (device_may_wakeup(&bdev->pdev->dev)) {
+		disable_irq_wake(bdev->irq);
+		bt_dev_dbg(bdev, "BCM irq: disabled");
 	}
 
-	/* When this callback executes, the device has woken up already */
-	if (bdev->is_suspended) {
-		bdev->is_suspended = false;
-
-		hci_uart_set_flow_control(bdev->hu, false);
-	}
+	bcm_resume_device(dev);
 
 unlock:
-	spin_unlock(&bcm_device_lock);
+	mutex_unlock(&bcm_device_lock);
+
+	pm_runtime_disable(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
 
 	return 0;
 }
@@ -397,24 +620,59 @@
 
 static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false };
 static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false };
+static const struct acpi_gpio_params host_wakeup_gpios = { 2, 0, false };
 
 static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = {
 	{ "device-wakeup-gpios", &device_wakeup_gpios, 1 },
 	{ "shutdown-gpios", &shutdown_gpios, 1 },
+	{ "host-wakeup-gpios", &host_wakeup_gpios, 1 },
 	{ },
 };
 
 #ifdef CONFIG_ACPI
+static u8 acpi_active_low = ACPI_ACTIVE_LOW;
+
+/* IRQ polarity of some chipsets are not defined correctly in ACPI table. */
+static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
+	{
+		.ident = "Asus T100TA",
+		.matches = {
+			DMI_EXACT_MATCH(DMI_SYS_VENDOR,
+					"ASUSTeK COMPUTER INC."),
+			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+		},
+		.driver_data = &acpi_active_low,
+	},
+	{ }
+};
+
 static int bcm_resource(struct acpi_resource *ares, void *data)
 {
 	struct bcm_device *dev = data;
+	struct acpi_resource_extended_irq *irq;
+	struct acpi_resource_gpio *gpio;
+	struct acpi_resource_uart_serialbus *sb;
 
-	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
-		struct acpi_resource_uart_serialbus *sb;
+	switch (ares->type) {
+	case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+		irq = &ares->data.extended_irq;
+		dev->irq_polarity = irq->polarity;
+		break;
 
+	case ACPI_RESOURCE_TYPE_GPIO:
+		gpio = &ares->data.gpio;
+		if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT)
+			dev->irq_polarity = gpio->polarity;
+		break;
+
+	case ACPI_RESOURCE_TYPE_SERIAL_BUS:
 		sb = &ares->data.uart_serial_bus;
 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART)
 			dev->init_speed = sb->default_baud_rate;
+		break;
+
+	default:
+		break;
 	}
 
 	/* Always tell the ACPI core to skip this resource */
@@ -424,15 +682,10 @@
 static int bcm_acpi_probe(struct bcm_device *dev)
 {
 	struct platform_device *pdev = dev->pdev;
-	const struct acpi_device_id *id;
-	struct acpi_device *adev;
 	LIST_HEAD(resources);
+	const struct dmi_system_id *dmi_id;
 	int ret;
 
-	id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
-	if (!id)
-		return -ENODEV;
-
 	/* Retrieve GPIO data */
 	dev->name = dev_name(&pdev->dev);
 	ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
@@ -453,6 +706,21 @@
 	if (IS_ERR(dev->shutdown))
 		return PTR_ERR(dev->shutdown);
 
+	/* IRQ can be declared in ACPI table as Interrupt or GpioInt */
+	dev->irq = platform_get_irq(pdev, 0);
+	if (dev->irq <= 0) {
+		struct gpio_desc *gpio;
+
+		gpio = devm_gpiod_get_optional(&pdev->dev, "host-wakeup",
+					       GPIOD_IN);
+		if (IS_ERR(gpio))
+			return PTR_ERR(gpio);
+
+		dev->irq = gpiod_to_irq(gpio);
+	}
+
+	dev_info(&pdev->dev, "BCM irq: %d\n", dev->irq);
+
 	/* Make sure at-least one of the GPIO is defined and that
 	 * a name is specified for this instance
 	 */
@@ -462,11 +730,18 @@
 	}
 
 	/* Retrieve UART ACPI info */
-	adev = ACPI_COMPANION(&dev->pdev->dev);
-	if (!adev)
-		return 0;
+	ret = acpi_dev_get_resources(ACPI_COMPANION(&dev->pdev->dev),
+				     &resources, bcm_resource, dev);
+	if (ret < 0)
+		return ret;
+	acpi_dev_free_resource_list(&resources);
 
-	acpi_dev_get_resources(adev, &resources, bcm_resource, dev);
+	dmi_id = dmi_first_match(bcm_wrong_irq_dmi_table);
+	if (dmi_id) {
+		bt_dev_warn(dev, "%s: Overwriting IRQ polarity to active low",
+			    dmi_id->ident);
+		dev->irq_polarity = *(u8 *)dmi_id->driver_data;
+	}
 
 	return 0;
 }
@@ -480,7 +755,6 @@
 static int bcm_probe(struct platform_device *pdev)
 {
 	struct bcm_device *dev;
-	struct acpi_device_id *pdata = pdev->dev.platform_data;
 	int ret;
 
 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
@@ -489,24 +763,18 @@
 
 	dev->pdev = pdev;
 
-	if (ACPI_HANDLE(&pdev->dev)) {
-		ret = bcm_acpi_probe(dev);
-		if (ret)
-			return ret;
-	} else if (pdata) {
-		dev->name = pdata->id;
-	} else {
-		return -ENODEV;
-	}
+	ret = bcm_acpi_probe(dev);
+	if (ret)
+		return ret;
 
 	platform_set_drvdata(pdev, dev);
 
 	dev_info(&pdev->dev, "%s device registered.\n", dev->name);
 
 	/* Place this instance on the device list */
-	spin_lock(&bcm_device_lock);
+	mutex_lock(&bcm_device_lock);
 	list_add_tail(&dev->list, &bcm_device_list);
-	spin_unlock(&bcm_device_lock);
+	mutex_unlock(&bcm_device_lock);
 
 	bcm_gpio_set_power(dev, false);
 
@@ -517,9 +785,9 @@
 {
 	struct bcm_device *dev = platform_get_drvdata(pdev);
 
-	spin_lock(&bcm_device_lock);
+	mutex_lock(&bcm_device_lock);
 	list_del(&dev->list);
-	spin_unlock(&bcm_device_lock);
+	mutex_unlock(&bcm_device_lock);
 
 	acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
 
@@ -553,7 +821,10 @@
 #endif
 
 /* Platform suspend and resume callbacks */
-static SIMPLE_DEV_PM_OPS(bcm_pm_ops, bcm_suspend, bcm_resume);
+static const struct dev_pm_ops bcm_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(bcm_suspend, bcm_resume)
+	SET_RUNTIME_PM_OPS(bcm_suspend_device, bcm_resume_device, NULL)
+};
 
 static struct platform_driver bcm_driver = {
 	.probe = bcm_probe,
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index eec3f28..a6fce48 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -266,3 +266,4 @@
 
 	return skb;
 }
+EXPORT_SYMBOL_GPL(h4_recv_buf);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index b35b238..abee221 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -128,7 +128,7 @@
 {
 	const unsigned char sync_req[] = { 0x01, 0x7e };
 	unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
-	struct hci_uart *hu = (struct hci_uart *) arg;
+	struct hci_uart *hu = (struct hci_uart *)arg;
 	struct h5 *h5 = hu->priv;
 	struct sk_buff *skb;
 	unsigned long flags;
@@ -210,7 +210,7 @@
 
 	init_timer(&h5->timer);
 	h5->timer.function = h5_timed_event;
-	h5->timer.data = (unsigned long) hu;
+	h5->timer.data = (unsigned long)hu;
 
 	h5->tx_win = H5_TX_WIN_MAX;
 
@@ -453,7 +453,7 @@
 		return -ENOMEM;
 	}
 
-	h5->rx_skb->dev = (void *) hu->hdev;
+	h5->rx_skb->dev = (void *)hu->hdev;
 
 	return 0;
 }
@@ -696,7 +696,7 @@
 	}
 
 	skb = skb_dequeue(&h5->unrel);
-	if (skb != NULL) {
+	if (skb) {
 		nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
 				      skb->data, skb->len);
 		if (nskb) {
@@ -714,7 +714,7 @@
 		goto unlock;
 
 	skb = skb_dequeue(&h5->rel);
-	if (skb != NULL) {
+	if (skb) {
 		nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
 				      skb->data, skb->len);
 		if (nskb) {
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index cf07d11..2952107 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -31,6 +31,8 @@
 #include <linux/platform_device.h>
 #include <linux/gpio/consumer.h>
 #include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -43,19 +45,45 @@
 #define STATE_FIRMWARE_LOADED	2
 #define STATE_FIRMWARE_FAILED	3
 #define STATE_BOOTING		4
+#define STATE_LPM_ENABLED	5
+#define STATE_TX_ACTIVE		6
+#define STATE_SUSPENDED		7
+#define STATE_LPM_TRANSACTION	8
+
+#define HCI_LPM_WAKE_PKT 0xf0
+#define HCI_LPM_PKT 0xf1
+#define HCI_LPM_MAX_SIZE 10
+#define HCI_LPM_HDR_SIZE HCI_EVENT_HDR_SIZE
+
+#define LPM_OP_TX_NOTIFY 0x00
+#define LPM_OP_SUSPEND_ACK 0x02
+#define LPM_OP_RESUME_ACK 0x03
+
+#define LPM_SUSPEND_DELAY_MS 1000
+
+struct hci_lpm_pkt {
+	__u8 opcode;
+	__u8 dlen;
+	__u8 data[0];
+} __packed;
 
 struct intel_device {
 	struct list_head list;
 	struct platform_device *pdev;
 	struct gpio_desc *reset;
+	struct hci_uart *hu;
+	struct mutex hu_lock;
+	int irq;
 };
 
 static LIST_HEAD(intel_device_list);
-static DEFINE_SPINLOCK(intel_device_list_lock);
+static DEFINE_MUTEX(intel_device_list_lock);
 
 struct intel_data {
 	struct sk_buff *rx_skb;
 	struct sk_buff_head txq;
+	struct work_struct busy_work;
+	struct hci_uart *hu;
 	unsigned long flags;
 };
 
@@ -101,24 +129,185 @@
 				  msecs_to_jiffies(1000));
 
 	if (err == 1) {
-		BT_ERR("%s: Device boot interrupted", hu->hdev->name);
+		bt_dev_err(hu->hdev, "Device boot interrupted");
 		return -EINTR;
 	}
 
 	if (err) {
-		BT_ERR("%s: Device boot timeout", hu->hdev->name);
+		bt_dev_err(hu->hdev, "Device boot timeout");
 		return -ETIMEDOUT;
 	}
 
 	return err;
 }
 
+#ifdef CONFIG_PM
+static int intel_wait_lpm_transaction(struct hci_uart *hu)
+{
+	struct intel_data *intel = hu->priv;
+	int err;
+
+	err = wait_on_bit_timeout(&intel->flags, STATE_LPM_TRANSACTION,
+				  TASK_INTERRUPTIBLE,
+				  msecs_to_jiffies(1000));
+
+	if (err == 1) {
+		bt_dev_err(hu->hdev, "LPM transaction interrupted");
+		return -EINTR;
+	}
+
+	if (err) {
+		bt_dev_err(hu->hdev, "LPM transaction timeout");
+		return -ETIMEDOUT;
+	}
+
+	return err;
+}
+
+static int intel_lpm_suspend(struct hci_uart *hu)
+{
+	static const u8 suspend[] = { 0x01, 0x01, 0x01 };
+	struct intel_data *intel = hu->priv;
+	struct sk_buff *skb;
+
+	if (!test_bit(STATE_LPM_ENABLED, &intel->flags) ||
+	    test_bit(STATE_SUSPENDED, &intel->flags))
+		return 0;
+
+	if (test_bit(STATE_TX_ACTIVE, &intel->flags))
+		return -EAGAIN;
+
+	bt_dev_dbg(hu->hdev, "Suspending");
+
+	skb = bt_skb_alloc(sizeof(suspend), GFP_KERNEL);
+	if (!skb) {
+		bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
+		return -ENOMEM;
+	}
+
+	memcpy(skb_put(skb, sizeof(suspend)), suspend, sizeof(suspend));
+	bt_cb(skb)->pkt_type = HCI_LPM_PKT;
+
+	set_bit(STATE_LPM_TRANSACTION, &intel->flags);
+
+	/* LPM flow is a priority, enqueue packet at list head */
+	skb_queue_head(&intel->txq, skb);
+	hci_uart_tx_wakeup(hu);
+
+	intel_wait_lpm_transaction(hu);
+	/* Even in case of failure, continue and test the suspended flag */
+
+	clear_bit(STATE_LPM_TRANSACTION, &intel->flags);
+
+	if (!test_bit(STATE_SUSPENDED, &intel->flags)) {
+		bt_dev_err(hu->hdev, "Device suspend error");
+		return -EINVAL;
+	}
+
+	bt_dev_dbg(hu->hdev, "Suspended");
+
+	hci_uart_set_flow_control(hu, true);
+
+	return 0;
+}
+
+static int intel_lpm_resume(struct hci_uart *hu)
+{
+	struct intel_data *intel = hu->priv;
+	struct sk_buff *skb;
+
+	if (!test_bit(STATE_LPM_ENABLED, &intel->flags) ||
+	    !test_bit(STATE_SUSPENDED, &intel->flags))
+		return 0;
+
+	bt_dev_dbg(hu->hdev, "Resuming");
+
+	hci_uart_set_flow_control(hu, false);
+
+	skb = bt_skb_alloc(0, GFP_KERNEL);
+	if (!skb) {
+		bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
+		return -ENOMEM;
+	}
+
+	bt_cb(skb)->pkt_type = HCI_LPM_WAKE_PKT;
+
+	set_bit(STATE_LPM_TRANSACTION, &intel->flags);
+
+	/* LPM flow is a priority, enqueue packet at list head */
+	skb_queue_head(&intel->txq, skb);
+	hci_uart_tx_wakeup(hu);
+
+	intel_wait_lpm_transaction(hu);
+	/* Even in case of failure, continue and test the suspended flag */
+
+	clear_bit(STATE_LPM_TRANSACTION, &intel->flags);
+
+	if (test_bit(STATE_SUSPENDED, &intel->flags)) {
+		bt_dev_err(hu->hdev, "Device resume error");
+		return -EINVAL;
+	}
+
+	bt_dev_dbg(hu->hdev, "Resumed");
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static int intel_lpm_host_wake(struct hci_uart *hu)
+{
+	static const u8 lpm_resume_ack[] = { LPM_OP_RESUME_ACK, 0x00 };
+	struct intel_data *intel = hu->priv;
+	struct sk_buff *skb;
+
+	hci_uart_set_flow_control(hu, false);
+
+	clear_bit(STATE_SUSPENDED, &intel->flags);
+
+	skb = bt_skb_alloc(sizeof(lpm_resume_ack), GFP_KERNEL);
+	if (!skb) {
+		bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
+		return -ENOMEM;
+	}
+
+	memcpy(skb_put(skb, sizeof(lpm_resume_ack)), lpm_resume_ack,
+	       sizeof(lpm_resume_ack));
+	bt_cb(skb)->pkt_type = HCI_LPM_PKT;
+
+	/* LPM flow is a priority, enqueue packet at list head */
+	skb_queue_head(&intel->txq, skb);
+	hci_uart_tx_wakeup(hu);
+
+	bt_dev_dbg(hu->hdev, "Resumed by controller");
+
+	return 0;
+}
+
+static irqreturn_t intel_irq(int irq, void *dev_id)
+{
+	struct intel_device *idev = dev_id;
+
+	dev_info(&idev->pdev->dev, "hci_intel irq\n");
+
+	mutex_lock(&idev->hu_lock);
+	if (idev->hu)
+		intel_lpm_host_wake(idev->hu);
+	mutex_unlock(&idev->hu_lock);
+
+	/* Host/Controller are now LPM resumed, trigger a new delayed suspend */
+	pm_runtime_get(&idev->pdev->dev);
+	pm_runtime_mark_last_busy(&idev->pdev->dev);
+	pm_runtime_put_autosuspend(&idev->pdev->dev);
+
+	return IRQ_HANDLED;
+}
+
 static int intel_set_power(struct hci_uart *hu, bool powered)
 {
 	struct list_head *p;
 	int err = -ENODEV;
 
-	spin_lock(&intel_device_list_lock);
+	mutex_lock(&intel_device_list_lock);
 
 	list_for_each(p, &intel_device_list) {
 		struct intel_device *idev = list_entry(p, struct intel_device,
@@ -139,13 +328,73 @@
 			hu, dev_name(&idev->pdev->dev), powered);
 
 		gpiod_set_value(idev->reset, powered);
+
+		/* Provide to idev a hu reference which is used to run LPM
+		 * transactions (lpm suspend/resume) from PM callbacks.
+		 * hu needs to be protected against concurrent removing during
+		 * these PM ops.
+		 */
+		mutex_lock(&idev->hu_lock);
+		idev->hu = powered ? hu : NULL;
+		mutex_unlock(&idev->hu_lock);
+
+		if (idev->irq < 0)
+			break;
+
+		if (powered && device_can_wakeup(&idev->pdev->dev)) {
+			err = devm_request_threaded_irq(&idev->pdev->dev,
+							idev->irq, NULL,
+							intel_irq,
+							IRQF_ONESHOT,
+							"bt-host-wake", idev);
+			if (err) {
+				BT_ERR("hu %p, unable to allocate irq-%d",
+				       hu, idev->irq);
+				break;
+			}
+
+			device_wakeup_enable(&idev->pdev->dev);
+
+			pm_runtime_set_active(&idev->pdev->dev);
+			pm_runtime_use_autosuspend(&idev->pdev->dev);
+			pm_runtime_set_autosuspend_delay(&idev->pdev->dev,
+							 LPM_SUSPEND_DELAY_MS);
+			pm_runtime_enable(&idev->pdev->dev);
+		} else if (!powered && device_may_wakeup(&idev->pdev->dev)) {
+			devm_free_irq(&idev->pdev->dev, idev->irq, idev);
+			device_wakeup_disable(&idev->pdev->dev);
+
+			pm_runtime_disable(&idev->pdev->dev);
+		}
 	}
 
-	spin_unlock(&intel_device_list_lock);
+	mutex_unlock(&intel_device_list_lock);
 
 	return err;
 }
 
+static void intel_busy_work(struct work_struct *work)
+{
+	struct list_head *p;
+	struct intel_data *intel = container_of(work, struct intel_data,
+						busy_work);
+
+	/* Link is busy, delay the suspend */
+	mutex_lock(&intel_device_list_lock);
+	list_for_each(p, &intel_device_list) {
+		struct intel_device *idev = list_entry(p, struct intel_device,
+						       list);
+
+		if (intel->hu->tty->dev->parent == idev->pdev->dev.parent) {
+			pm_runtime_get(&idev->pdev->dev);
+			pm_runtime_mark_last_busy(&idev->pdev->dev);
+			pm_runtime_put_autosuspend(&idev->pdev->dev);
+			break;
+		}
+	}
+	mutex_unlock(&intel_device_list_lock);
+}
+
 static int intel_open(struct hci_uart *hu)
 {
 	struct intel_data *intel;
@@ -157,6 +406,9 @@
 		return -ENOMEM;
 
 	skb_queue_head_init(&intel->txq);
+	INIT_WORK(&intel->busy_work, intel_busy_work);
+
+	intel->hu = hu;
 
 	hu->priv = intel;
 
@@ -172,6 +424,8 @@
 
 	BT_DBG("hu %p", hu);
 
+	cancel_work_sync(&intel->busy_work);
+
 	intel_set_power(hu, false);
 
 	skb_queue_purge(&intel->txq);
@@ -237,11 +491,11 @@
 	if (err && err != ETIMEDOUT)
 		return err;
 
-	BT_INFO("%s: Change controller speed to %d", hdev->name, speed);
+	bt_dev_info(hdev, "Change controller speed to %d", speed);
 
 	speed_cmd[3] = intel_convert_speed(speed);
 	if (speed_cmd[3] == 0xff) {
-		BT_ERR("%s: Unsupported speed", hdev->name);
+		bt_dev_err(hdev, "Unsupported speed");
 		return -EINVAL;
 	}
 
@@ -250,16 +504,15 @@
 	 */
 	skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
 	if (IS_ERR(skb)) {
-		BT_ERR("%s: Reading Intel version information failed (%ld)",
-		       hdev->name, PTR_ERR(skb));
+		bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
+			   PTR_ERR(skb));
 		return PTR_ERR(skb);
 	}
 	kfree_skb(skb);
 
 	skb = bt_skb_alloc(sizeof(speed_cmd), GFP_KERNEL);
 	if (!skb) {
-		BT_ERR("%s: Failed to allocate memory for baudrate packet",
-		       hdev->name);
+		bt_dev_err(hdev, "Failed to alloc memory for baudrate packet");
 		return -ENOMEM;
 	}
 
@@ -284,11 +537,14 @@
 {
 	static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
 					  0x00, 0x08, 0x04, 0x00 };
+	static const u8 lpm_param[] = { 0x03, 0x07, 0x01, 0x0b };
 	struct intel_data *intel = hu->priv;
+	struct intel_device *idev = NULL;
 	struct hci_dev *hdev = hu->hdev;
 	struct sk_buff *skb;
 	struct intel_version *ver;
 	struct intel_boot_params *params;
+	struct list_head *p;
 	const struct firmware *fw;
 	const u8 *fw_ptr;
 	char fwname[64];
@@ -299,7 +555,7 @@
 	int speed_change = 0;
 	int err;
 
-	BT_DBG("%s", hdev->name);
+	bt_dev_dbg(hdev, "start intel_setup");
 
 	hu->hdev->set_bdaddr = btintel_set_bdaddr;
 
@@ -335,21 +591,21 @@
 	 */
 	skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
 	if (IS_ERR(skb)) {
-		BT_ERR("%s: Reading Intel version information failed (%ld)",
-		       hdev->name, PTR_ERR(skb));
+		bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
+			   PTR_ERR(skb));
 		return PTR_ERR(skb);
 	}
 
 	if (skb->len != sizeof(*ver)) {
-		BT_ERR("%s: Intel version event size mismatch", hdev->name);
+		bt_dev_err(hdev, "Intel version event size mismatch");
 		kfree_skb(skb);
 		return -EILSEQ;
 	}
 
 	ver = (struct intel_version *)skb->data;
 	if (ver->status) {
-		BT_ERR("%s: Intel version command failure (%02x)",
-		       hdev->name, ver->status);
+		bt_dev_err(hdev, "Intel version command failure (%02x)",
+			   ver->status);
 		err = -bt_to_errno(ver->status);
 		kfree_skb(skb);
 		return err;
@@ -359,8 +615,8 @@
 	 * for now only accept this single value.
 	 */
 	if (ver->hw_platform != 0x37) {
-		BT_ERR("%s: Unsupported Intel hardware platform (%u)",
-		       hdev->name, ver->hw_platform);
+		bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)",
+			   ver->hw_platform);
 		kfree_skb(skb);
 		return -EINVAL;
 	}
@@ -371,8 +627,8 @@
 	 * when newer hardware variants come along.
 	 */
 	if (ver->hw_variant != 0x0b) {
-		BT_ERR("%s: Unsupported Intel hardware variant (%u)",
-		       hdev->name, ver->hw_variant);
+		bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
+			   ver->hw_variant);
 		kfree_skb(skb);
 		return -EINVAL;
 	}
@@ -403,8 +659,8 @@
 	 * choice is to return an error and abort the device initialization.
 	 */
 	if (ver->fw_variant != 0x06) {
-		BT_ERR("%s: Unsupported Intel firmware variant (%u)",
-		       hdev->name, ver->fw_variant);
+		bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)",
+			   ver->fw_variant);
 		kfree_skb(skb);
 		return -ENODEV;
 	}
@@ -416,33 +672,33 @@
 	 */
 	skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
 	if (IS_ERR(skb)) {
-		BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
-		       hdev->name, PTR_ERR(skb));
+		bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)",
+			   PTR_ERR(skb));
 		return PTR_ERR(skb);
 	}
 
 	if (skb->len != sizeof(*params)) {
-		BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
+		bt_dev_err(hdev, "Intel boot parameters size mismatch");
 		kfree_skb(skb);
 		return -EILSEQ;
 	}
 
 	params = (struct intel_boot_params *)skb->data;
 	if (params->status) {
-		BT_ERR("%s: Intel boot parameters command failure (%02x)",
-		       hdev->name, params->status);
+		bt_dev_err(hdev, "Intel boot parameters command failure (%02x)",
+			   params->status);
 		err = -bt_to_errno(params->status);
 		kfree_skb(skb);
 		return err;
 	}
 
-	BT_INFO("%s: Device revision is %u", hdev->name,
-		le16_to_cpu(params->dev_revid));
+	bt_dev_info(hdev, "Device revision is %u",
+		    le16_to_cpu(params->dev_revid));
 
-	BT_INFO("%s: Secure boot is %s", hdev->name,
-		params->secure_boot ? "enabled" : "disabled");
+	bt_dev_info(hdev, "Secure boot is %s",
+		    params->secure_boot ? "enabled" : "disabled");
 
-	BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
+	bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
 		params->min_fw_build_nn, params->min_fw_build_cw,
 		2000 + params->min_fw_build_yy);
 
@@ -451,8 +707,8 @@
 	 * that this bootloader does not send them, then abort the setup.
 	 */
 	if (params->limited_cce != 0x00) {
-		BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
-		       hdev->name, params->limited_cce);
+		bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)",
+			   params->limited_cce);
 		kfree_skb(skb);
 		return -EINVAL;
 	}
@@ -461,7 +717,7 @@
 	 * also be no valid address for the operational firmware.
 	 */
 	if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
-		BT_INFO("%s: No device address configured", hdev->name);
+		bt_dev_info(hdev, "No device address configured");
 		set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
 	}
 
@@ -476,19 +732,23 @@
 
 	err = request_firmware(&fw, fwname, &hdev->dev);
 	if (err < 0) {
-		BT_ERR("%s: Failed to load Intel firmware file (%d)",
-		       hdev->name, err);
+		bt_dev_err(hdev, "Failed to load Intel firmware file (%d)",
+			   err);
 		kfree_skb(skb);
 		return err;
 	}
 
-	BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
+	bt_dev_info(hdev, "Found device firmware: %s", fwname);
+
+	/* Save the DDC file name for later */
+	snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.ddc",
+		 le16_to_cpu(params->dev_revid));
 
 	kfree_skb(skb);
 
 	if (fw->size < 644) {
-		BT_ERR("%s: Invalid size of firmware file (%zu)",
-		       hdev->name, fw->size);
+		bt_dev_err(hdev, "Invalid size of firmware file (%zu)",
+			   fw->size);
 		err = -EBADF;
 		goto done;
 	}
@@ -500,8 +760,7 @@
 	 */
 	err = btintel_secure_send(hdev, 0x00, 128, fw->data);
 	if (err < 0) {
-		BT_ERR("%s: Failed to send firmware header (%d)",
-		       hdev->name, err);
+		bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
 		goto done;
 	}
 
@@ -510,8 +769,8 @@
 	 */
 	err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
 	if (err < 0) {
-		BT_ERR("%s: Failed to send firmware public key (%d)",
-		       hdev->name, err);
+		bt_dev_err(hdev, "Failed to send firmware public key (%d)",
+			   err);
 		goto done;
 	}
 
@@ -520,8 +779,8 @@
 	 */
 	err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
 	if (err < 0) {
-		BT_ERR("%s: Failed to send firmware signature (%d)",
-		       hdev->name, err);
+		bt_dev_err(hdev, "Failed to send firmware signature (%d)",
+			   err);
 		goto done;
 	}
 
@@ -533,8 +792,8 @@
 
 		frag_len += sizeof(*cmd) + cmd->plen;
 
-		BT_DBG("%s: patching %td/%zu", hdev->name,
-		       (fw_ptr - fw->data), fw->size);
+		bt_dev_dbg(hdev, "Patching %td/%zu", (fw_ptr - fw->data),
+			   fw->size);
 
 		/* The parameter length of the secure send command requires
 		 * a 4 byte alignment. It happens so that the firmware file
@@ -552,8 +811,8 @@
 		 */
 		err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
 		if (err < 0) {
-			BT_ERR("%s: Failed to send firmware data (%d)",
-			       hdev->name, err);
+			bt_dev_err(hdev, "Failed to send firmware data (%d)",
+				   err);
 			goto done;
 		}
 
@@ -563,7 +822,7 @@
 
 	set_bit(STATE_FIRMWARE_LOADED, &intel->flags);
 
-	BT_INFO("%s: Waiting for firmware download to complete", hdev->name);
+	bt_dev_info(hdev, "Waiting for firmware download to complete");
 
 	/* Before switching the device into operational mode and with that
 	 * booting the loaded firmware, wait for the bootloader notification
@@ -580,19 +839,19 @@
 				  TASK_INTERRUPTIBLE,
 				  msecs_to_jiffies(5000));
 	if (err == 1) {
-		BT_ERR("%s: Firmware loading interrupted", hdev->name);
+		bt_dev_err(hdev, "Firmware loading interrupted");
 		err = -EINTR;
 		goto done;
 	}
 
 	if (err) {
-		BT_ERR("%s: Firmware loading timeout", hdev->name);
+		bt_dev_err(hdev, "Firmware loading timeout");
 		err = -ETIMEDOUT;
 		goto done;
 	}
 
 	if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) {
-		BT_ERR("%s: Firmware loading failed", hdev->name);
+		bt_dev_err(hdev, "Firmware loading failed");
 		err = -ENOEXEC;
 		goto done;
 	}
@@ -601,7 +860,7 @@
 	delta = ktime_sub(rettime, calltime);
 	duration = (unsigned long long) ktime_to_ns(delta) >> 10;
 
-	BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration);
+	bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration);
 
 done:
 	release_firmware(fw);
@@ -634,7 +893,7 @@
 	 * 1 second. However if that happens, then just fail the setup
 	 * since something went wrong.
 	 */
-	BT_INFO("%s: Waiting for device to boot", hdev->name);
+	bt_dev_info(hdev, "Waiting for device to boot");
 
 	err = intel_wait_booting(hu);
 	if (err)
@@ -646,7 +905,39 @@
 	delta = ktime_sub(rettime, calltime);
 	duration = (unsigned long long) ktime_to_ns(delta) >> 10;
 
-	BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration);
+	bt_dev_info(hdev, "Device booted in %llu usecs", duration);
+
+	/* Enable LPM if matching pdev with wakeup enabled */
+	mutex_lock(&intel_device_list_lock);
+	list_for_each(p, &intel_device_list) {
+		struct intel_device *dev = list_entry(p, struct intel_device,
+						      list);
+		if (hu->tty->dev->parent == dev->pdev->dev.parent) {
+			if (device_may_wakeup(&dev->pdev->dev))
+				idev = dev;
+			break;
+		}
+	}
+	mutex_unlock(&intel_device_list_lock);
+
+	if (!idev)
+		goto no_lpm;
+
+	bt_dev_info(hdev, "Enabling LPM");
+
+	skb = __hci_cmd_sync(hdev, 0xfc8b, sizeof(lpm_param), lpm_param,
+			     HCI_CMD_TIMEOUT);
+	if (IS_ERR(skb)) {
+		bt_dev_err(hdev, "Failed to enable LPM");
+		goto no_lpm;
+	}
+	kfree_skb(skb);
+
+	set_bit(STATE_LPM_ENABLED, &intel->flags);
+
+no_lpm:
+	/* Ignore errors, device can work without DDC parameters */
+	btintel_load_ddc_config(hdev, fwname);
 
 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT);
 	if (IS_ERR(skb))
@@ -659,7 +950,7 @@
 			return err;
 	}
 
-	BT_INFO("%s: Setup complete", hdev->name);
+	bt_dev_info(hdev, "Setup complete");
 
 	clear_bit(STATE_BOOTLOADER, &intel->flags);
 
@@ -708,10 +999,71 @@
 	return hci_recv_frame(hdev, skb);
 }
 
+static void intel_recv_lpm_notify(struct hci_dev *hdev, int value)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+	struct intel_data *intel = hu->priv;
+
+	bt_dev_dbg(hdev, "TX idle notification (%d)", value);
+
+	if (value) {
+		set_bit(STATE_TX_ACTIVE, &intel->flags);
+		schedule_work(&intel->busy_work);
+	} else {
+		clear_bit(STATE_TX_ACTIVE, &intel->flags);
+	}
+}
+
+static int intel_recv_lpm(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_lpm_pkt *lpm = (void *)skb->data;
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+	struct intel_data *intel = hu->priv;
+
+	switch (lpm->opcode) {
+	case LPM_OP_TX_NOTIFY:
+		if (lpm->dlen < 1) {
+			bt_dev_err(hu->hdev, "Invalid LPM notification packet");
+			break;
+		}
+		intel_recv_lpm_notify(hdev, lpm->data[0]);
+		break;
+	case LPM_OP_SUSPEND_ACK:
+		set_bit(STATE_SUSPENDED, &intel->flags);
+		if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) {
+			smp_mb__after_atomic();
+			wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION);
+		}
+		break;
+	case LPM_OP_RESUME_ACK:
+		clear_bit(STATE_SUSPENDED, &intel->flags);
+		if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) {
+			smp_mb__after_atomic();
+			wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION);
+		}
+		break;
+	default:
+		bt_dev_err(hdev, "Unknown LPM opcode (%02x)", lpm->opcode);
+		break;
+	}
+
+	kfree_skb(skb);
+
+	return 0;
+}
+
+#define INTEL_RECV_LPM \
+	.type = HCI_LPM_PKT, \
+	.hlen = HCI_LPM_HDR_SIZE, \
+	.loff = 1, \
+	.lsize = 1, \
+	.maxlen = HCI_LPM_MAX_SIZE
+
 static const struct h4_recv_pkt intel_recv_pkts[] = {
-	{ H4_RECV_ACL,   .recv = hci_recv_frame },
-	{ H4_RECV_SCO,   .recv = hci_recv_frame },
-	{ H4_RECV_EVENT, .recv = intel_recv_event },
+	{ H4_RECV_ACL,    .recv = hci_recv_frame   },
+	{ H4_RECV_SCO,    .recv = hci_recv_frame   },
+	{ H4_RECV_EVENT,  .recv = intel_recv_event },
+	{ INTEL_RECV_LPM, .recv = intel_recv_lpm   },
 };
 
 static int intel_recv(struct hci_uart *hu, const void *data, int count)
@@ -726,7 +1078,7 @@
 				    ARRAY_SIZE(intel_recv_pkts));
 	if (IS_ERR(intel->rx_skb)) {
 		int err = PTR_ERR(intel->rx_skb);
-		BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+		bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
 		intel->rx_skb = NULL;
 		return err;
 	}
@@ -737,9 +1089,27 @@
 static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
 {
 	struct intel_data *intel = hu->priv;
+	struct list_head *p;
 
 	BT_DBG("hu %p skb %p", hu, skb);
 
+	/* Be sure our controller is resumed and potential LPM transaction
+	 * completed before enqueuing any packet.
+	 */
+	mutex_lock(&intel_device_list_lock);
+	list_for_each(p, &intel_device_list) {
+		struct intel_device *idev = list_entry(p, struct intel_device,
+						       list);
+
+		if (hu->tty->dev->parent == idev->pdev->dev.parent) {
+			pm_runtime_get_sync(&idev->pdev->dev);
+			pm_runtime_mark_last_busy(&idev->pdev->dev);
+			pm_runtime_put_autosuspend(&idev->pdev->dev);
+			break;
+		}
+	}
+	mutex_unlock(&intel_device_list_lock);
+
 	skb_queue_tail(&intel->txq, skb);
 
 	return 0;
@@ -795,24 +1165,61 @@
 	{ },
 };
 MODULE_DEVICE_TABLE(acpi, intel_acpi_match);
+#endif
 
-static int intel_acpi_probe(struct intel_device *idev)
+#ifdef CONFIG_PM
+static int intel_suspend_device(struct device *dev)
 {
-	const struct acpi_device_id *id;
+	struct intel_device *idev = dev_get_drvdata(dev);
 
-	id = acpi_match_device(intel_acpi_match, &idev->pdev->dev);
-	if (!id)
-		return -ENODEV;
+	mutex_lock(&idev->hu_lock);
+	if (idev->hu)
+		intel_lpm_suspend(idev->hu);
+	mutex_unlock(&idev->hu_lock);
 
 	return 0;
 }
-#else
-static int intel_acpi_probe(struct intel_device *idev)
+
+static int intel_resume_device(struct device *dev)
 {
-	return -ENODEV;
+	struct intel_device *idev = dev_get_drvdata(dev);
+
+	mutex_lock(&idev->hu_lock);
+	if (idev->hu)
+		intel_lpm_resume(idev->hu);
+	mutex_unlock(&idev->hu_lock);
+
+	return 0;
 }
 #endif
 
+#ifdef CONFIG_PM_SLEEP
+static int intel_suspend(struct device *dev)
+{
+	struct intel_device *idev = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev))
+		enable_irq_wake(idev->irq);
+
+	return intel_suspend_device(dev);
+}
+
+static int intel_resume(struct device *dev)
+{
+	struct intel_device *idev = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev))
+		disable_irq_wake(idev->irq);
+
+	return intel_resume_device(dev);
+}
+#endif
+
+static const struct dev_pm_ops intel_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
+	SET_RUNTIME_PM_OPS(intel_suspend_device, intel_resume_device, NULL)
+};
+
 static int intel_probe(struct platform_device *pdev)
 {
 	struct intel_device *idev;
@@ -821,15 +1228,9 @@
 	if (!idev)
 		return -ENOMEM;
 
-	idev->pdev = pdev;
+	mutex_init(&idev->hu_lock);
 
-	if (ACPI_HANDLE(&pdev->dev)) {
-		int err = intel_acpi_probe(idev);
-		if (err)
-			return err;
-	} else {
-		return -ENODEV;
-	}
+	idev->pdev = pdev;
 
 	idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset",
 					      GPIOD_OUT_LOW);
@@ -838,14 +1239,40 @@
 		return PTR_ERR(idev->reset);
 	}
 
+	idev->irq = platform_get_irq(pdev, 0);
+	if (idev->irq < 0) {
+		struct gpio_desc *host_wake;
+
+		dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n");
+
+		host_wake = devm_gpiod_get_optional(&pdev->dev, "host-wake",
+						    GPIOD_IN);
+		if (IS_ERR(host_wake)) {
+			dev_err(&pdev->dev, "Unable to retrieve IRQ\n");
+			goto no_irq;
+		}
+
+		idev->irq = gpiod_to_irq(host_wake);
+		if (idev->irq < 0) {
+			dev_err(&pdev->dev, "No corresponding irq for gpio\n");
+			goto no_irq;
+		}
+	}
+
+	/* Only enable wake-up/irq when controller is powered */
+	device_set_wakeup_capable(&pdev->dev, true);
+	device_wakeup_disable(&pdev->dev);
+
+no_irq:
 	platform_set_drvdata(pdev, idev);
 
 	/* Place this instance on the device list */
-	spin_lock(&intel_device_list_lock);
+	mutex_lock(&intel_device_list_lock);
 	list_add_tail(&idev->list, &intel_device_list);
-	spin_unlock(&intel_device_list_lock);
+	mutex_unlock(&intel_device_list_lock);
 
-	dev_info(&pdev->dev, "registered.\n");
+	dev_info(&pdev->dev, "registered, gpio(%d)/irq(%d).\n",
+		 desc_to_gpio(idev->reset), idev->irq);
 
 	return 0;
 }
@@ -854,9 +1281,11 @@
 {
 	struct intel_device *idev = platform_get_drvdata(pdev);
 
-	spin_lock(&intel_device_list_lock);
+	device_wakeup_disable(&pdev->dev);
+
+	mutex_lock(&intel_device_list_lock);
 	list_del(&idev->list);
-	spin_unlock(&intel_device_list_lock);
+	mutex_unlock(&intel_device_list_lock);
 
 	dev_info(&pdev->dev, "unregistered.\n");
 
@@ -869,6 +1298,7 @@
 	.driver = {
 		.name = "hci_intel",
 		.acpi_match_table = ACPI_PTR(intel_acpi_match),
+		.pm = &intel_pm_ops,
 	},
 };
 
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 0d5a05a..01a83a3 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -208,9 +208,6 @@
 	BT_DBG("%s %p", hdev->name, hdev);
 
 	/* Nothing to do for UART driver */
-
-	set_bit(HCI_RUNNING, &hdev->flags);
-
 	return 0;
 }
 
@@ -241,9 +238,6 @@
 {
 	BT_DBG("hdev %p", hdev);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	hci_uart_flush(hdev);
 	hdev->flush = NULL;
 	return 0;
@@ -254,9 +248,6 @@
 {
 	struct hci_uart *hu = hci_get_drvdata(hdev);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
 
 	hu->proto->enqueue(hu, skb);
@@ -470,8 +461,6 @@
 	INIT_WORK(&hu->init_ready, hci_uart_init_work);
 	INIT_WORK(&hu->write_work, hci_uart_write_work);
 
-	spin_lock_init(&hu->rx_lock);
-
 	/* Flush any pending characters in the driver and line discipline. */
 
 	/* FIXME: why is this needed. Note don't use ldisc_ref here as the
@@ -569,14 +558,14 @@
 	if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
 		return;
 
-	spin_lock(&hu->rx_lock);
+	/* It does not need a lock here as it is already protected by a mutex in
+	 * tty caller
+	 */
 	hu->proto->recv(hu, data, count);
 
 	if (hu->hdev)
 		hu->hdev->stat.byte_rx += count;
 
-	spin_unlock(&hu->rx_lock);
-
 	tty_unthrottle(tty);
 }
 
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 6b9b912..b4a0393 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -41,13 +41,13 @@
 #define HCI_IBS_SLEEP_IND	0xFE
 #define HCI_IBS_WAKE_IND	0xFD
 #define HCI_IBS_WAKE_ACK	0xFC
-#define HCI_MAX_IBS_SIZE 	10
+#define HCI_MAX_IBS_SIZE	10
 
 /* Controller states */
 #define STATE_IN_BAND_SLEEP_ENABLED	1
 
-#define IBS_WAKE_RETRANS_TIMEOUT_MS 	100
-#define IBS_TX_IDLE_TIMEOUT_MS 		2000
+#define IBS_WAKE_RETRANS_TIMEOUT_MS	100
+#define IBS_TX_IDLE_TIMEOUT_MS		2000
 #define BAUDRATE_SETTLE_TIMEOUT_MS	300
 
 /* HCI_IBS transmit side sleep protocol states */
@@ -181,8 +181,8 @@
 		else
 			__serial_clock_off(hu->tty);
 
-		BT_DBG("Vote serial clock %s(%s)", new_vote? "true" : "false",
-		       vote? "true" : "false");
+		BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
+		       vote ? "true" : "false");
 
 		diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
 
@@ -347,7 +347,7 @@
 	struct hci_uart *hu = (struct hci_uart *)arg;
 	struct qca_data *qca = hu->priv;
 	unsigned long flags, retrans_delay;
-	unsigned long retransmit = 0;
+	bool retransmit = false;
 
 	BT_DBG("hu %p wake retransmit timeout in %d state",
 		hu, qca->tx_ibs_state);
@@ -358,7 +358,7 @@
 	switch (qca->tx_ibs_state) {
 	case HCI_IBS_TX_WAKING:
 		/* No WAKE_ACK, retransmit WAKE */
-		retransmit = 1;
+		retransmit = true;
 		if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
 			BT_ERR("Failed to acknowledge device wake up");
 			break;
@@ -821,7 +821,7 @@
 
 static uint8_t qca_get_baudrate_value(int speed)
 {
-	switch(speed) {
+	switch (speed) {
 	case 9600:
 		return QCA_BAUDRATE_9600;
 	case 19200:
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 495b9ef..2f7bb35 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -85,7 +85,6 @@
 
 	struct sk_buff		*tx_skb;
 	unsigned long		tx_state;
-	spinlock_t		rx_lock;
 
 	unsigned int init_speed;
 	unsigned int oper_speed;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 78653db..ed888e3 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -55,8 +55,6 @@
 
 static int vhci_open_dev(struct hci_dev *hdev)
 {
-	set_bit(HCI_RUNNING, &hdev->flags);
-
 	return 0;
 }
 
@@ -64,9 +62,6 @@
 {
 	struct vhci_data *data = hci_get_drvdata(hdev);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	skb_queue_purge(&data->readq);
 
 	return 0;
@@ -85,9 +80,6 @@
 {
 	struct vhci_data *data = hci_get_drvdata(hdev);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
 	skb_queue_tail(&data->readq, skb);
 
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index c37cf75..3c77645 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -344,11 +344,12 @@
 	if (IS_ERR(ctx->csr_base))
 		return PTR_ERR(ctx->csr_base);
 
-	ctx->irq = platform_get_irq(pdev, 0);
-	if (ctx->irq < 0) {
+	rc = platform_get_irq(pdev, 0);
+	if (rc < 0) {
 		dev_err(&pdev->dev, "No IRQ resource\n");
-		return ctx->irq;
+		return rc;
 	}
+	ctx->irq = rc;
 
 	dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
 		ctx->csr_base, ctx->irq);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 43e2c3a..0ebcf44 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2437,7 +2437,8 @@
 	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
 		if (orphan->num_parents && orphan->ops->get_parent) {
 			i = orphan->ops->get_parent(orphan->hw);
-			if (!strcmp(core->name, orphan->parent_names[i]))
+			if (i >= 0 && i < orphan->num_parents &&
+			    !strcmp(core->name, orphan->parent_names[i]))
 				clk_core_reparent(orphan, core);
 			continue;
 		}
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index 2a38eb4..6cf38dc 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -8,6 +8,7 @@
 #include <linux/err.h>
 #include <linux/device.h>
 #include <linux/of_address.h>
+#include <linux/slab.h>
 
 static DEFINE_SPINLOCK(clklock);
 
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig
index 2c16807..e434854 100644
--- a/drivers/clk/hisilicon/Kconfig
+++ b/drivers/clk/hisilicon/Kconfig
@@ -1,6 +1,12 @@
 config COMMON_CLK_HI6220
 	bool "Hi6220 Clock Driver"
-	depends on (ARCH_HISI || COMPILE_TEST) && MAILBOX
+	depends on ARCH_HISI || COMPILE_TEST
 	default ARCH_HISI
 	help
 	  Build the Hisilicon Hi6220 clock driver based on the common clock framework.
+
+config STUB_CLK_HI6220
+	bool "Hi6220 Stub Clock Driver"
+	depends on COMMON_CLK_HI6220 && MAILBOX
+	help
+	  Build the Hisilicon Hi6220 stub clock driver.
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index 4a1001a..74dba31 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -7,4 +7,5 @@
 obj-$(CONFIG_ARCH_HI3xxx)	+= clk-hi3620.o
 obj-$(CONFIG_ARCH_HIP04)	+= clk-hip04.o
 obj-$(CONFIG_ARCH_HIX5HD2)	+= clk-hix5hd2.o
-obj-$(CONFIG_COMMON_CLK_HI6220)	+= clk-hi6220.o clk-hi6220-stub.o
+obj-$(CONFIG_COMMON_CLK_HI6220)	+= clk-hi6220.o
+obj-$(CONFIG_STUB_CLK_HI6220)	+= clk-hi6220-stub.o
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index ed02bbc..abb4760 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -716,6 +716,8 @@
 	"aclk_cpu",
 	"aclk_peri",
 	"hclk_peri",
+	"pclk_cpu",
+	"pclk_peri",
 };
 
 static void __init rk3188_common_clk_init(struct device_node *np)
@@ -744,8 +746,6 @@
 
 	rockchip_clk_register_branches(common_clk_branches,
 				  ARRAY_SIZE(common_clk_branches));
-	rockchip_clk_protect_critical(rk3188_critical_clocks,
-				      ARRAY_SIZE(rk3188_critical_clocks));
 
 	rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0),
 				  ROCKCHIP_SOFTRST_HIWORD_MASK);
@@ -765,6 +765,8 @@
 			mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
 			&rk3066_cpuclk_data, rk3066_cpuclk_rates,
 			ARRAY_SIZE(rk3066_cpuclk_rates));
+	rockchip_clk_protect_critical(rk3188_critical_clocks,
+				      ARRAY_SIZE(rk3188_critical_clocks));
 }
 CLK_OF_DECLARE(rk3066a_cru, "rockchip,rk3066a-cru", rk3066a_clk_init);
 
@@ -801,6 +803,9 @@
 		pr_warn("%s: missing clocks to reparent aclk_cpu_pre to gpll\n",
 			__func__);
 	}
+
+	rockchip_clk_protect_critical(rk3188_critical_clocks,
+				      ARRAY_SIZE(rk3188_critical_clocks));
 }
 CLK_OF_DECLARE(rk3188a_cru, "rockchip,rk3188a-cru", rk3188a_clk_init);
 
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 9c5d61e..7e6b783 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -818,6 +818,10 @@
 	GATE(0, "sclk_timer00", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 0, GFLAGS),
 };
 
+static const char *const rk3368_critical_clocks[] __initconst = {
+	"pclk_pd_pmu",
+};
+
 static void __init rk3368_clk_init(struct device_node *np)
 {
 	void __iomem *reg_base;
@@ -862,6 +866,8 @@
 				   RK3368_GRF_SOC_STATUS0);
 	rockchip_clk_register_branches(rk3368_clk_branches,
 				  ARRAY_SIZE(rk3368_clk_branches));
+	rockchip_clk_protect_critical(rk3368_critical_clocks,
+				      ARRAY_SIZE(rk3368_critical_clocks));
 
 	rockchip_clk_register_armclk(ARMCLKB, "armclkb",
 			mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p),
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index 83ccf14..576cd03 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -307,7 +307,7 @@
 	.get_rate	= clk_fs660c32_dig_get_rate,
 };
 
-static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
+static const struct clkgen_quadfs_data st_fs660c32_C = {
 	.nrst_present = true,
 	.nrst	= { CLKGEN_FIELD(0x2f0, 0x1, 0),
 		    CLKGEN_FIELD(0x2f0, 0x1, 1),
@@ -350,7 +350,7 @@
 	.get_rate	= clk_fs660c32_dig_get_rate,
 };
 
-static const struct clkgen_quadfs_data st_fs660c32_D_407 = {
+static const struct clkgen_quadfs_data st_fs660c32_D = {
 	.nrst_present = true,
 	.nrst	= { CLKGEN_FIELD(0x2a0, 0x1, 0),
 		    CLKGEN_FIELD(0x2a0, 0x1, 1),
@@ -1077,11 +1077,11 @@
 	},
 	{
 		.compatible = "st,stih407-quadfs660-C",
-		.data = &st_fs660c32_C_407
+		.data = &st_fs660c32_C
 	},
 	{
 		.compatible = "st,stih407-quadfs660-D",
-		.data = &st_fs660c32_D_407
+		.data = &st_fs660c32_D
 	},
 	{}
 };
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index 47a38a9..b2a332c 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -193,7 +193,7 @@
 	.ops		= &stm_pll3200c32_ops,
 };
 
-static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = {
+static const struct clkgen_pll_data st_pll3200c32_cx_0 = {
 	/* 407 C0 PLL0 */
 	.pdn_status	= CLKGEN_FIELD(0x2a0,	0x1,			8),
 	.locked_status	= CLKGEN_FIELD(0x2a0,	0x1,			24),
@@ -205,7 +205,7 @@
 	.ops		= &stm_pll3200c32_ops,
 };
 
-static const struct clkgen_pll_data st_pll3200c32_407_c0_1 = {
+static const struct clkgen_pll_data st_pll3200c32_cx_1 = {
 	/* 407 C0 PLL1 */
 	.pdn_status	= CLKGEN_FIELD(0x2c8,	0x1,			8),
 	.locked_status	= CLKGEN_FIELD(0x2c8,	0x1,			24),
@@ -624,12 +624,12 @@
 		.data = &st_pll3200c32_407_a0,
 	},
 	{
-		.compatible = "st,stih407-plls-c32-c0_0",
-		.data = &st_pll3200c32_407_c0_0,
+		.compatible = "st,plls-c32-cx_0",
+		.data = &st_pll3200c32_cx_0,
 	},
 	{
-		.compatible = "st,stih407-plls-c32-c0_1",
-		.data = &st_pll3200c32_407_c0_1,
+		.compatible = "st,plls-c32-cx_1",
+		.data = &st_pll3200c32_cx_1,
 	},
 	{
 		.compatible = "st,stih407-plls-c32-a9",
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index c2ff859..c4e3a52 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -682,11 +682,17 @@
 	struct dev_pm_opp *opp;
 	int i, uv;
 
+	rcu_read_lock();
+
 	opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
-	if (IS_ERR(opp))
+	if (IS_ERR(opp)) {
+		rcu_read_unlock();
 		return PTR_ERR(opp);
+	}
 	uv = dev_pm_opp_get_voltage(opp);
 
+	rcu_read_unlock();
+
 	for (i = 0; i < td->i2c_lut_size; i++) {
 		if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv)
 			return i;
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 5f498d9..cd0391e 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,6 +84,7 @@
 config ARM_MT8173_CPUFREQ
 	bool "Mediatek MT8173 CPUFreq support"
 	depends on ARCH_MEDIATEK && REGULATOR
+	depends on !CPU_THERMAL || THERMAL=y
 	select PM_OPP
 	help
 	  This adds the CPUFreq driver support for Mediatek MT8173 SoC.
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 15b921a..7982772 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -375,12 +375,11 @@
 
 	pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
 
-	policy = cpufreq_cpu_get(cpu);
+	policy = cpufreq_cpu_get_raw(cpu);
 	if (unlikely(!policy))
 		return 0;
 
 	data = policy->driver_data;
-	cpufreq_cpu_put(policy);
 	if (unlikely(!data || !data->freq_table))
 		return 0;
 
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index c3583cd..7c0d70e 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -196,6 +196,7 @@
 	struct device *cpu_dev;
 	struct regulator *cpu_reg;
 	struct clk *cpu_clk;
+	struct dev_pm_opp *suspend_opp;
 	unsigned long min_uV = ~0, max_uV = 0;
 	unsigned int transition_latency;
 	bool need_update = false;
@@ -239,6 +240,17 @@
 	 */
 	of_cpumask_init_opp_table(policy->cpus);
 
+	/*
+	 * But we need OPP table to function so if it is not there let's
+	 * give platform code chance to provide it for us.
+	 */
+	ret = dev_pm_opp_get_opp_count(cpu_dev);
+	if (ret <= 0) {
+		pr_debug("OPP table is not ready, deferring probe\n");
+		ret = -EPROBE_DEFER;
+		goto out_free_opp;
+	}
+
 	if (need_update) {
 		struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
 
@@ -249,24 +261,16 @@
 		 * OPP tables are initialized only for policy->cpu, do it for
 		 * others as well.
 		 */
-		set_cpus_sharing_opps(cpu_dev, policy->cpus);
+		ret = set_cpus_sharing_opps(cpu_dev, policy->cpus);
+		if (ret)
+			dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+				__func__, ret);
 
 		of_property_read_u32(np, "clock-latency", &transition_latency);
 	} else {
 		transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
 	}
 
-	/*
-	 * But we need OPP table to function so if it is not there let's
-	 * give platform code chance to provide it for us.
-	 */
-	ret = dev_pm_opp_get_opp_count(cpu_dev);
-	if (ret <= 0) {
-		pr_debug("OPP table is not ready, deferring probe\n");
-		ret = -EPROBE_DEFER;
-		goto out_free_opp;
-	}
-
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv) {
 		ret = -ENOMEM;
@@ -300,7 +304,8 @@
 			rcu_read_unlock();
 
 			tol_uV = opp_uV * priv->voltage_tolerance / 100;
-			if (regulator_is_supported_voltage(cpu_reg, opp_uV,
+			if (regulator_is_supported_voltage(cpu_reg,
+							   opp_uV - tol_uV,
 							   opp_uV + tol_uV)) {
 				if (opp_uV < min_uV)
 					min_uV = opp_uV;
@@ -329,6 +334,13 @@
 	policy->driver_data = priv;
 
 	policy->clk = cpu_clk;
+
+	rcu_read_lock();
+	suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev);
+	if (suspend_opp)
+		policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
+	rcu_read_unlock();
+
 	ret = cpufreq_table_validate_and_show(policy, freq_table);
 	if (ret) {
 		dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
@@ -419,6 +431,7 @@
 	.ready = cpufreq_ready,
 	.name = "cpufreq-dt",
 	.attr = cpufreq_dt_attr,
+	.suspend = cpufreq_generic_suspend,
 };
 
 static int dt_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b3d9368..ef5ed94 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -238,13 +238,13 @@
 }
 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
 
-/* Only for cpufreq core internal use */
 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
 {
 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
 
 	return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
 }
+EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
 
 unsigned int cpufreq_generic_get(unsigned int cpu)
 {
@@ -1626,8 +1626,8 @@
 	int ret;
 
 	if (!policy->suspend_freq) {
-		pr_err("%s: suspend_freq can't be zero\n", __func__);
-		return -EINVAL;
+		pr_debug("%s: suspend_freq not defined\n", __func__);
+		return 0;
 	}
 
 	pr_debug("%s: Setting suspend-freq: %u\n", __func__,
@@ -2031,8 +2031,7 @@
 		if (!try_module_get(policy->governor->owner))
 			return -EINVAL;
 
-	pr_debug("__cpufreq_governor for CPU %u, event %u\n",
-		 policy->cpu, event);
+	pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
 
 	mutex_lock(&cpufreq_governor_lock);
 	if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cddc619..3af9dd7 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -260,24 +260,31 @@
 		 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
 }
 
-#define PCT_TO_HWP(x) (x * 255 / 100)
 static void intel_pstate_hwp_set(void)
 {
-	int min, max, cpu;
-	u64 value, freq;
+	int min, hw_min, max, hw_max, cpu, range, adj_range;
+	u64 value, cap;
+
+	rdmsrl(MSR_HWP_CAPABILITIES, cap);
+	hw_min = HWP_LOWEST_PERF(cap);
+	hw_max = HWP_HIGHEST_PERF(cap);
+	range = hw_max - hw_min;
 
 	get_online_cpus();
 
 	for_each_online_cpu(cpu) {
 		rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
-		min = PCT_TO_HWP(limits.min_perf_pct);
+		adj_range = limits.min_perf_pct * range / 100;
+		min = hw_min + adj_range;
 		value &= ~HWP_MIN_PERF(~0L);
 		value |= HWP_MIN_PERF(min);
 
-		max = PCT_TO_HWP(limits.max_perf_pct);
+		adj_range = limits.max_perf_pct * range / 100;
+		max = hw_min + adj_range;
 		if (limits.no_turbo) {
-			rdmsrl( MSR_HWP_CAPABILITIES, freq);
-			max = HWP_GUARANTEED_PERF(freq);
+			hw_max = HWP_GUARANTEED_PERF(cap);
+			if (hw_max < max)
+				max = hw_max;
 		}
 
 		value &= ~HWP_MAX_PERF(~0L);
@@ -423,6 +430,8 @@
 
 	limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
 	limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
+	limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
+	limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct);
 	limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
 
 	if (hwp_active)
@@ -442,6 +451,8 @@
 
 	limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
 	limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
+	limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
+	limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
 	limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
 
 	if (hwp_active)
@@ -989,12 +1000,19 @@
 
 	limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
 	limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
-	limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
-	limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
-
 	limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
 	limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
+
+	/* Normalize user input to [min_policy_pct, max_policy_pct] */
+	limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
+	limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
 	limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
+	limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
+
+	/* Make sure min_perf_pct <= max_perf_pct */
+	limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
+
+	limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
 	limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
 
 	if (hwp_active)
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 1523e2d..344058f 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -187,6 +187,28 @@
 }
 
 /**
+ * cpuidle_coupled_state_verify - check if the coupled states are correctly set.
+ * @drv: struct cpuidle_driver for the platform
+ *
+ * Returns 0 for valid state values, a negative error code otherwise:
+ *  * -EINVAL if any coupled state(safe_state_index) is wrongly set.
+ */
+int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
+{
+	int i;
+
+	for (i = drv->state_count - 1; i >= 0; i--) {
+		if (cpuidle_state_is_coupled(drv, i) &&
+		    (drv->safe_state_index == i ||
+		     drv->safe_state_index < 0 ||
+		     drv->safe_state_index >= drv->state_count))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
  * cpuidle_coupled_set_ready - mark a cpu as ready
  * @coupled: the struct coupled that contains the current cpu
  */
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 178c5ad..f87f399 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -35,6 +35,7 @@
 
 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
 bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state);
+int cpuidle_coupled_state_verify(struct cpuidle_driver *drv);
 int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
 		struct cpuidle_driver *drv, int next_state);
 int cpuidle_coupled_register_device(struct cpuidle_device *dev);
@@ -46,6 +47,11 @@
 	return false;
 }
 
+static inline int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
+{
+	return 0;
+}
+
 static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
 		struct cpuidle_driver *drv, int next_state)
 {
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 5db1478..389ade4 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -227,6 +227,10 @@
 	if (!drv || !drv->state_count)
 		return -EINVAL;
 
+	ret = cpuidle_coupled_state_verify(drv);
+	if (ret)
+		return ret;
+
 	if (cpuidle_disabled())
 		return -ENODEV;
 
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 07bc7aa..d234719 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -461,7 +461,7 @@
 
 config CRYPTO_DEV_VMX
 	bool "Support for VMX cryptographic acceleration instructions"
-	depends on PPC64
+	depends on PPC64 && VSX
 	help
 	  Support for VMX cryptographic acceleration instructions.
 
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index b60698b..bc2a55b 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -687,6 +687,33 @@
 
 int mv_cesa_queue_req(struct crypto_async_request *req);
 
+/*
+ * Helper function that indicates whether a crypto request needs to be
+ * cleaned up or not after being enqueued using mv_cesa_queue_req().
+ */
+static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
+					    int ret)
+{
+	/*
+	 * The queue still had some space, the request was queued
+	 * normally, so there's no need to clean it up.
+	 */
+	if (ret == -EINPROGRESS)
+		return false;
+
+	/*
+	 * The queue had not space left, but since the request is
+	 * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to
+	 * the backlog and will be processed later. There's no need to
+	 * clean it up.
+	 */
+	if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+		return false;
+
+	/* Request wasn't queued, we need to clean it up */
+	return true;
+}
+
 /* TDMA functions */
 
 static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 0745cf3..3df2f4e 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -189,7 +189,6 @@
 {
 	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
 	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
-
 	creq->req.base.engine = engine;
 
 	if (creq->req.base.type == CESA_DMA_REQ)
@@ -431,7 +430,7 @@
 		return ret;
 
 	ret = mv_cesa_queue_req(&req->base);
-	if (ret && ret != -EINPROGRESS)
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
 		mv_cesa_ablkcipher_cleanup(req);
 
 	return ret;
@@ -551,7 +550,7 @@
 		return ret;
 
 	ret = mv_cesa_queue_req(&req->base);
-	if (ret && ret != -EINPROGRESS)
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
 		mv_cesa_ablkcipher_cleanup(req);
 
 	return ret;
@@ -693,7 +692,7 @@
 		return ret;
 
 	ret = mv_cesa_queue_req(&req->base);
-	if (ret && ret != -EINPROGRESS)
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
 		mv_cesa_ablkcipher_cleanup(req);
 
 	return ret;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index ae9272e..e8d0d71 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -739,10 +739,8 @@
 		return 0;
 
 	ret = mv_cesa_queue_req(&req->base);
-	if (ret && ret != -EINPROGRESS) {
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
 		mv_cesa_ahash_cleanup(req);
-		return ret;
-	}
 
 	return ret;
 }
@@ -766,7 +764,7 @@
 		return 0;
 
 	ret = mv_cesa_queue_req(&req->base);
-	if (ret && ret != -EINPROGRESS)
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
 		mv_cesa_ahash_cleanup(req);
 
 	return ret;
@@ -791,7 +789,7 @@
 		return 0;
 
 	ret = mv_cesa_queue_req(&req->base);
-	if (ret && ret != -EINPROGRESS)
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
 		mv_cesa_ahash_cleanup(req);
 
 	return ret;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index a57b419..0a5ca0b 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -88,6 +88,9 @@
 	struct pci_dev *parent = pdev->bus->self;
 	uint16_t bridge_ctl = 0;
 
+	if (accel_dev->is_vf)
+		return;
+
 	dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
 		 accel_dev->accel_id);
 
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index e070c31..a19ee12 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -104,7 +104,7 @@
 			sg_miter_next(&mo);
 			oo = 0;
 		}
-	} while (mo.length > 0);
+	} while (oleft > 0);
 
 	if (areq->info) {
 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index ca1b362..3927ed9 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -53,7 +53,7 @@
 {
 	struct devfreq *tmp_devfreq;
 
-	if (unlikely(IS_ERR_OR_NULL(dev))) {
+	if (IS_ERR_OR_NULL(dev)) {
 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
 		return ERR_PTR(-EINVAL);
 	}
@@ -133,7 +133,7 @@
 {
 	struct devfreq_governor *tmp_governor;
 
-	if (unlikely(IS_ERR_OR_NULL(name))) {
+	if (IS_ERR_OR_NULL(name)) {
 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
 		return ERR_PTR(-EINVAL);
 	}
@@ -177,10 +177,10 @@
 		return err;
 
 	/*
-	 * Adjust the freuqency with user freq and QoS.
+	 * Adjust the frequency with user freq and QoS.
 	 *
-	 * List from the highest proiority
-	 * max_freq (probably called by thermal when it's too hot)
+	 * List from the highest priority
+	 * max_freq
 	 * min_freq
 	 */
 
@@ -482,7 +482,7 @@
 						devfreq->profile->max_state *
 						devfreq->profile->max_state,
 						GFP_KERNEL);
-	devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
+	devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) *
 						devfreq->profile->max_state,
 						GFP_KERNEL);
 	devfreq->last_stat_updated = jiffies;
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index f9901f5..f312485 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -319,7 +319,8 @@
 	case PPMU_PMNCNT3:
 		pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
 		pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
-		load_count = (u64)((pmcnt_high & 0xff) << 32) + (u64)pmcnt_low;
+		load_count = ((u64)((pmcnt_high & 0xff)) << 32)
+			   + (u64)pmcnt_low;
 		break;
 	}
 	edata->load_count = load_count;
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index 0720ba8..ae72ba5 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -21,17 +21,20 @@
 static int devfreq_simple_ondemand_func(struct devfreq *df,
 					unsigned long *freq)
 {
-	struct devfreq_dev_status stat;
-	int err = df->profile->get_dev_status(df->dev.parent, &stat);
+	int err;
+	struct devfreq_dev_status *stat;
 	unsigned long long a, b;
 	unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
 	unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
 	struct devfreq_simple_ondemand_data *data = df->data;
 	unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
 
+	err = devfreq_update_stats(df);
 	if (err)
 		return err;
 
+	stat = &df->last_status;
+
 	if (data) {
 		if (data->upthreshold)
 			dfso_upthreshold = data->upthreshold;
@@ -43,41 +46,41 @@
 		return -EINVAL;
 
 	/* Assume MAX if it is going to be divided by zero */
-	if (stat.total_time == 0) {
+	if (stat->total_time == 0) {
 		*freq = max;
 		return 0;
 	}
 
 	/* Prevent overflow */
-	if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) {
-		stat.busy_time >>= 7;
-		stat.total_time >>= 7;
+	if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) {
+		stat->busy_time >>= 7;
+		stat->total_time >>= 7;
 	}
 
 	/* Set MAX if it's busy enough */
-	if (stat.busy_time * 100 >
-	    stat.total_time * dfso_upthreshold) {
+	if (stat->busy_time * 100 >
+	    stat->total_time * dfso_upthreshold) {
 		*freq = max;
 		return 0;
 	}
 
 	/* Set MAX if we do not know the initial frequency */
-	if (stat.current_frequency == 0) {
+	if (stat->current_frequency == 0) {
 		*freq = max;
 		return 0;
 	}
 
 	/* Keep the current frequency */
-	if (stat.busy_time * 100 >
-	    stat.total_time * (dfso_upthreshold - dfso_downdifferential)) {
-		*freq = stat.current_frequency;
+	if (stat->busy_time * 100 >
+	    stat->total_time * (dfso_upthreshold - dfso_downdifferential)) {
+		*freq = stat->current_frequency;
 		return 0;
 	}
 
 	/* Set the desired frequency based on the load */
-	a = stat.busy_time;
-	a *= stat.current_frequency;
-	b = div_u64(a, stat.total_time);
+	a = stat->busy_time;
+	a *= stat->current_frequency;
+	b = div_u64(a, stat->total_time);
 	b *= 100;
 	b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
 	*freq = (unsigned long) b;
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 13a1a6e..848b93ee 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -541,18 +541,20 @@
 static int tegra_governor_get_target(struct devfreq *devfreq,
 				     unsigned long *freq)
 {
-	struct devfreq_dev_status stat;
+	struct devfreq_dev_status *stat;
 	struct tegra_devfreq *tegra;
 	struct tegra_devfreq_device *dev;
 	unsigned long target_freq = 0;
 	unsigned int i;
 	int err;
 
-	err = devfreq->profile->get_dev_status(devfreq->dev.parent, &stat);
+	err = devfreq_update_stats(devfreq);
 	if (err)
 		return err;
 
-	tegra = stat.private_data;
+	stat = &devfreq->last_status;
+
+	tegra = stat->private_data;
 
 	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
 		dev = &tegra->devices[i];
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 4768a82..2bf37e6 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -266,7 +266,7 @@
 }
 
 /* Chained IRQ handler for IPU function and error interrupt */
-static void ipu_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void ipu_irq_handler(struct irq_desc *desc)
 {
 	struct ipu *ipu = irq_desc_get_handler_data(desc);
 	u32 status;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index ca78311..cf1268d 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -280,6 +280,7 @@
 	u8		max_interleave;
 	u8		(*get_node_id)(struct sbridge_pvt *pvt);
 	enum mem_type	(*get_memory_type)(struct sbridge_pvt *pvt);
+	enum dev_type	(*get_width)(struct sbridge_pvt *pvt, u32 mtr);
 	struct pci_dev	*pci_vtd;
 };
 
@@ -471,6 +472,9 @@
 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
 static const struct pci_id_descr pci_dev_descr_haswell[] = {
 	/* first item must be the HA */
 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0)		},
@@ -488,6 +492,9 @@
 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1)	},
 
 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1)		},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1)		},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1)		},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1)		},
 
 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1)		},
 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1)	},
@@ -762,6 +769,49 @@
 	return mtype;
 }
 
+static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
+{
+	/* there's no way to figure out */
+	return DEV_UNKNOWN;
+}
+
+static enum dev_type __ibridge_get_width(u32 mtr)
+{
+	enum dev_type type;
+
+	switch (mtr) {
+	case 3:
+		type = DEV_UNKNOWN;
+		break;
+	case 2:
+		type = DEV_X16;
+		break;
+	case 1:
+		type = DEV_X8;
+		break;
+	case 0:
+		type = DEV_X4;
+		break;
+	}
+
+	return type;
+}
+
+static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
+{
+	/*
+	 * ddr3_width on the documentation but also valid for DDR4 on
+	 * Haswell
+	 */
+	return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
+}
+
+static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
+{
+	/* ddr3_width on the documentation but also valid for DDR4 */
+	return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
+}
+
 static u8 get_node_id(struct sbridge_pvt *pvt)
 {
 	u32 reg;
@@ -966,17 +1016,7 @@
 
 				dimm->nr_pages = npages;
 				dimm->grain = 32;
-				switch (banks) {
-				case 16:
-					dimm->dtype = DEV_X16;
-					break;
-				case 8:
-					dimm->dtype = DEV_X8;
-					break;
-				case 4:
-					dimm->dtype = DEV_X4;
-					break;
-				}
+				dimm->dtype = pvt->info.get_width(pvt, mtr);
 				dimm->mtype = mtype;
 				dimm->edac_mode = mode;
 				snprintf(dimm->label, sizeof(dimm->label),
@@ -1869,7 +1909,11 @@
 		}
 			break;
 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
-			pvt->pci_ddrio = pdev;
+		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
+		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
+		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
+			if (!pvt->pci_ddrio)
+				pvt->pci_ddrio = pdev;
 			break;
 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
 			pvt->pci_ha1 = pdev;
@@ -2361,6 +2405,7 @@
 		pvt->info.interleave_list = ibridge_interleave_list;
 		pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
 		pvt->info.interleave_pkg = ibridge_interleave_pkg;
+		pvt->info.get_width = ibridge_get_width;
 		mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx);
 
 		/* Store pci devices at mci for faster access */
@@ -2380,6 +2425,7 @@
 		pvt->info.interleave_list = sbridge_interleave_list;
 		pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
 		pvt->info.interleave_pkg = sbridge_interleave_pkg;
+		pvt->info.get_width = sbridge_get_width;
 		mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
 
 		/* Store pci devices at mci for faster access */
@@ -2399,6 +2445,7 @@
 		pvt->info.interleave_list = ibridge_interleave_list;
 		pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
 		pvt->info.interleave_pkg = ibridge_interleave_pkg;
+		pvt->info.get_width = ibridge_get_width;
 		mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx);
 
 		/* Store pci devices at mci for faster access */
@@ -2418,6 +2465,7 @@
 		pvt->info.interleave_list = ibridge_interleave_list;
 		pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
 		pvt->info.interleave_pkg = ibridge_interleave_pkg;
+		pvt->info.get_width = broadwell_get_width;
 		mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx);
 
 		/* Store pci devices at mci for faster access */
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index a07addd..8dd0af1 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -159,7 +159,7 @@
 static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
 {
 	if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
-		*attached = new ? true : false;
+		*attached = ((new >> idx) & 0x1) ? true : false;
 		return true;
 	}
 
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index d8de6a8..665efca 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -139,6 +139,14 @@
 	bool
 	depends on ARM || ARM64
 
+config QCOM_SCM_32
+	def_bool y
+	depends on QCOM_SCM && ARM
+
+config QCOM_SCM_64
+	def_bool y
+	depends on QCOM_SCM && ARM64
+
 source "drivers/firmware/broadcom/Kconfig"
 source "drivers/firmware/google/Kconfig"
 source "drivers/firmware/efi/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 000830f..2ee8347 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -13,7 +13,8 @@
 obj-$(CONFIG_ISCSI_IBFT)	+= iscsi_ibft.o
 obj-$(CONFIG_FIRMWARE_MEMMAP)	+= memmap.o
 obj-$(CONFIG_QCOM_SCM)		+= qcom_scm.o
-obj-$(CONFIG_QCOM_SCM)		+= qcom_scm-32.o
+obj-$(CONFIG_QCOM_SCM_64)	+= qcom_scm-64.o
+obj-$(CONFIG_QCOM_SCM_32)	+= qcom_scm-32.o
 CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
 
 obj-y				+= broadcom/
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index e334a01..6b6548f 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -5,10 +5,6 @@
 /* error code which can't be mistaken for valid address */
 #define EFI_ERROR	(~0UL)
 
-#undef memcpy
-#undef memset
-#undef memmove
-
 void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
 
 efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
new file mode 100644
index 0000000..bb6555f
--- /dev/null
+++ b/drivers/firmware/qcom_scm-64.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/qcom_scm.h>
+
+/**
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the cold boot address of the cpus. Any cpu outside the supported
+ * range would be removed from the cpu present mask.
+ */
+int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+	return -ENOTSUPP;
+}
+
+/**
+ * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the Linux entry point for the SCM to transfer control to when coming
+ * out of a power down. CPU power down may be executed on cpuidle or hotplug.
+ */
+int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+{
+	return -ENOTSUPP;
+}
+
+/**
+ * qcom_scm_cpu_power_down() - Power down the cpu
+ * @flags - Flags to flush cache
+ *
+ * This is an end point to power down cpu. If there was a pending interrupt,
+ * the control would return from this function, otherwise, the cpu jumps to the
+ * warm boot entry point set for this cpu upon reset.
+ */
+void __qcom_scm_cpu_power_down(u32 flags)
+{
+}
+
+int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+	return -ENOTSUPP;
+}
+
+int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+{
+	return -ENOTSUPP;
+}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b4fc9e4..8949b3f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -356,7 +356,7 @@
 
 config GPIO_RCAR
 	tristate "Renesas R-Car GPIO"
-	depends on ARM && (ARCH_SHMOBILE || COMPILE_TEST)
+	depends on ARCH_SHMOBILE || COMPILE_TEST
 	select GPIOLIB_IRQCHIP
 	help
 	  Say yes here to support GPIO on Renesas R-Car SoCs.
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 9b7e0b3d..1b44941 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -201,8 +201,7 @@
 	return 0;
 }
 
-static void altera_gpio_irq_edge_handler(unsigned int irq,
-					struct irq_desc *desc)
+static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
 {
 	struct altera_gpio_chip *altera_gc;
 	struct irq_chip *chip;
@@ -231,8 +230,7 @@
 }
 
 
-static void altera_gpio_irq_leveL_high_handler(unsigned int irq,
-					      struct irq_desc *desc)
+static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
 {
 	struct altera_gpio_chip *altera_gc;
 	struct irq_chip *chip;
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 31b90ac..33a1f97 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -433,7 +433,7 @@
 	return 0;
 }
 
-static void bcm_kona_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
 {
 	void __iomem *reg_base;
 	int bit, bank_id;
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 9ea86d2..4c64627 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -236,7 +236,7 @@
 }
 
 /* Each UPG GIO block has one IRQ for all banks */
-static void brcmstb_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void brcmstb_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 94b0ab7..5e71538 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -326,8 +326,7 @@
 	.flags		= IRQCHIP_SET_TYPE_MASKED,
 };
 
-static void
-gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
+static void gpio_irq_handler(struct irq_desc *desc)
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 	struct davinci_gpio_regs __iomem *g;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index c5be4b9..fcd5b0a 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -147,7 +147,7 @@
 	return ret;
 }
 
-static void dwapb_irq_handler(u32 irq, struct irq_desc *desc)
+static void dwapb_irq_handler(struct irq_desc *desc)
 {
 	struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 9d90366..3e3947b 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -78,7 +78,7 @@
 		EP93XX_GPIO_REG(int_debounce_register_offset[port]));
 }
 
-static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
 {
 	unsigned char status;
 	int i;
@@ -100,8 +100,7 @@
 	}
 }
 
-static void ep93xx_gpio_f_irq_handler(unsigned int __irq,
-				      struct irq_desc *desc)
+static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
 {
 	/*
 	 * map discontiguous hw irq range to continuous sw irq range:
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index aa28c65..7009747 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -301,7 +301,7 @@
 };
 MODULE_DEVICE_TABLE(pci, intel_gpio_ids);
 
-static void intel_mid_irq_handler(unsigned irq, struct irq_desc *desc)
+static void intel_mid_irq_handler(struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct intel_mid_gpio *priv = to_intel_gpio_priv(gc);
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 153af46..127c37b 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -234,7 +234,7 @@
 	return 0;
 }
 
-static void lp_gpio_irq_handler(unsigned hwirq, struct irq_desc *desc)
+static void lp_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct irq_data *data = irq_desc_get_irq_data(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 8ef7a12..48ef368 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -194,7 +194,7 @@
 		return -ENXIO;
 }
 
-static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
+static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc)
 {
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index 7bcfb87..22523aa 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -232,7 +232,7 @@
 	.irq_bus_sync_unlock	= msic_bus_sync_unlock,
 };
 
-static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void msic_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct irq_data *data = irq_desc_get_irq_data(desc);
 	struct msic_gpio *mg = irq_data_get_irq_handler_data(data);
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index d2012cf..4b42221 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -305,7 +305,7 @@
  * which have been set as summary IRQ lines and which are triggered,
  * and to call their interrupt handlers.
  */
-static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void msm_summary_irq_handler(struct irq_desc *desc)
 {
 	unsigned long i;
 	struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index b396bf3..df418b8 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -458,7 +458,7 @@
 	return 0;
 }
 
-static void mvebu_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void mvebu_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index b752b56..b8dd847 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -272,7 +272,7 @@
 }
 
 /* MX1 and MX3 has one interrupt *per* gpio port */
-static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+static void mx3_gpio_irq_handler(struct irq_desc *desc)
 {
 	u32 irq_stat;
 	struct mxc_gpio_port *port = irq_desc_get_handler_data(desc);
@@ -288,7 +288,7 @@
 }
 
 /* MX2 has one interrupt *for all* gpio ports */
-static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+static void mx2_gpio_irq_handler(struct irq_desc *desc)
 {
 	u32 irq_msk, irq_stat;
 	struct mxc_gpio_port *port;
@@ -339,13 +339,15 @@
 	return 0;
 }
 
-static void mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
+static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
 {
 	struct irq_chip_generic *gc;
 	struct irq_chip_type *ct;
 
 	gc = irq_alloc_generic_chip("gpio-mxc", 1, irq_base,
 				    port->base, handle_level_irq);
+	if (!gc)
+		return -ENOMEM;
 	gc->private = port;
 
 	ct = gc->chip_types;
@@ -360,6 +362,8 @@
 
 	irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
 			       IRQ_NOREQUEST, 0);
+
+	return 0;
 }
 
 static void mxc_gpio_get_hw(struct platform_device *pdev)
@@ -477,12 +481,16 @@
 	}
 
 	/* gpio-mxc can be a generic irq chip */
-	mxc_gpio_init_gc(port, irq_base);
+	err = mxc_gpio_init_gc(port, irq_base);
+	if (err < 0)
+		goto out_irqdomain_remove;
 
 	list_add_tail(&port->node, &mxc_gpio_ports);
 
 	return 0;
 
+out_irqdomain_remove:
+	irq_domain_remove(port->domain);
 out_irqdesc_free:
 	irq_free_descs(irq_base, 32);
 out_gpiochip_remove:
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b7f383e..a4288f4 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -154,7 +154,7 @@
 }
 
 /* MXS has one interrupt *per* gpio port */
-static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+static void mxs_gpio_irq_handler(struct irq_desc *desc)
 {
 	u32 irq_stat;
 	struct mxs_gpio_port *port = irq_desc_get_handler_data(desc);
@@ -196,13 +196,16 @@
 	return 0;
 }
 
-static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
+static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
 {
 	struct irq_chip_generic *gc;
 	struct irq_chip_type *ct;
 
 	gc = irq_alloc_generic_chip("gpio-mxs", 1, irq_base,
 				    port->base, handle_level_irq);
+	if (!gc)
+		return -ENOMEM;
+
 	gc->private = port;
 
 	ct = gc->chip_types;
@@ -216,6 +219,8 @@
 
 	irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
 			       IRQ_NOREQUEST, 0);
+
+	return 0;
 }
 
 static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -317,7 +322,9 @@
 	}
 
 	/* gpio-mxs can be a generic irq chip */
-	mxs_gpio_init_gc(port, irq_base);
+	err = mxs_gpio_init_gc(port, irq_base);
+	if (err < 0)
+		goto out_irqdomain_remove;
 
 	/* setup one handler for each entry */
 	irq_set_chained_handler_and_data(port->irq, mxs_gpio_irq_handler,
@@ -343,6 +350,8 @@
 
 out_bgpio_remove:
 	bgpio_remove(&port->bgc);
+out_irqdomain_remove:
+	irq_domain_remove(port->domain);
 out_irqdesc_free:
 	irq_free_descs(irq_base, 32);
 	return err;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 2ae0d47..5236db1 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -709,7 +709,7 @@
  * line's interrupt handler has been run, we may miss some nested
  * interrupts.
  */
-static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void omap_gpio_irq_handler(struct irq_desc *desc)
 {
 	void __iomem *isr_reg = NULL;
 	u32 isr;
@@ -1098,7 +1098,6 @@
 	} else {
 		bank->chip.label = "gpio";
 		bank->chip.base = gpio;
-		gpio += bank->width;
 	}
 	bank->chip.ngpio = bank->width;
 
@@ -1108,6 +1107,9 @@
 		return ret;
 	}
 
+	if (!bank->is_mpuio)
+		gpio += bank->width;
+
 #ifdef CONFIG_ARCH_OMAP1
 	/*
 	 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
@@ -1253,8 +1255,11 @@
 	omap_gpio_mod_init(bank);
 
 	ret = omap_gpio_chip_init(bank, irqc);
-	if (ret)
+	if (ret) {
+		pm_runtime_put_sync(bank->dev);
+		pm_runtime_disable(bank->dev);
 		return ret;
+	}
 
 	omap_gpio_show_rev(bank);
 
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 0475613..229ef65 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -187,7 +187,7 @@
 	return 0;
 }
 
-static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
+static void pl061_irq_handler(struct irq_desc *desc)
 {
 	unsigned long pending;
 	int offset;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 55a11de..df2ce55 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -401,7 +401,7 @@
 	return 0;
 }
 
-static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
+static void pxa_gpio_demux_handler(struct irq_desc *desc)
 {
 	struct pxa_gpio_chip *c;
 	int loop, gpio, gpio_base, n;
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 67bd2f5..990fa90 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -172,8 +172,7 @@
  * irq_controller_lock held, and IRQs disabled.  Decode the IRQ
  * and call the handler.
  */
-static void
-sa1100_gpio_handler(unsigned int __irq, struct irq_desc *desc)
+static void sa1100_gpio_handler(struct irq_desc *desc)
 {
 	unsigned int irq, mask;
 
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index 458d9d7..9c6b967 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -706,4 +706,3 @@
 MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
 MODULE_DESCRIPTION("Driver for Semtech SX150X I2C GPIO Expanders");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("i2c:sx150x");
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 9b14aaf..027e5f4 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -266,7 +266,7 @@
 	gpiochip_unlock_as_irq(&tegra_gpio_chip, gpio);
 }
 
-static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void tegra_gpio_irq_handler(struct irq_desc *desc)
 {
 	int port;
 	int pin;
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 5a49205..30653e6 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -192,7 +192,7 @@
 	return ret;
 }
 
-static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
+static void timbgpio_irq(struct irq_desc *desc)
 {
 	struct timbgpio *tgpio = irq_desc_get_handler_data(desc);
 	struct irq_data *data = irq_desc_get_irq_data(desc);
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index bbac92a..87bb1b1 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -375,7 +375,7 @@
 #define gpio_set_irq_wake NULL
 #endif
 
-static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void tz1090_gpio_irq_handler(struct irq_desc *desc)
 {
 	irq_hw_number_t hw;
 	unsigned int irq_stat, irq_no;
@@ -400,7 +400,7 @@
 				== IRQ_TYPE_EDGE_BOTH)
 			tz1090_gpio_irq_next_edge(bank, hw);
 
-		generic_handle_irq_desc(irq_no, child_desc);
+		generic_handle_irq_desc(child_desc);
 	}
 }
 
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 3d5714d..069f9e4 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -120,7 +120,7 @@
 	return pinctrl_gpio_direction_output(chip->base + gpio);
 }
 
-static void vf610_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+static void vf610_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct vf610_gpio_port *port = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -176,9 +176,9 @@
 	port->irqc[d->hwirq] = irqc;
 
 	if (type & IRQ_TYPE_LEVEL_MASK)
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index 12ee196..4b8a269 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -177,7 +177,7 @@
 	return 0;
 }
 
-static void zx_irq_handler(unsigned irq, struct irq_desc *desc)
+static void zx_irq_handler(struct irq_desc *desc)
 {
 	unsigned long pending;
 	int offset;
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 27348e7..1d1a586 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -514,7 +514,7 @@
  * application for that pin.
  * Note: A bug is reported if no handler is set for the gpio pin.
  */
-static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc)
+static void zynq_gpio_irqhandler(struct irq_desc *desc)
 {
 	u32 int_sts, int_enb;
 	unsigned int bank_num;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 980c1f8..5db3445 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1174,15 +1174,16 @@
  * that the GPIO was actually requested.
  */
 
-static bool _gpiod_get_raw_value(const struct gpio_desc *desc)
+static int _gpiod_get_raw_value(const struct gpio_desc *desc)
 {
 	struct gpio_chip	*chip;
-	bool value;
 	int offset;
+	int value;
 
 	chip = desc->chip;
 	offset = gpio_chip_hwgpio(desc);
-	value = chip->get ? chip->get(chip, offset) : false;
+	value = chip->get ? chip->get(chip, offset) : -EIO;
+	value = value < 0 ? value : !!value;
 	trace_gpio_value(desc_to_gpio(desc), 1, value);
 	return value;
 }
@@ -1192,7 +1193,7 @@
  * @desc: gpio whose value will be returned
  *
  * Return the GPIO's raw value, i.e. the value of the physical line disregarding
- * its ACTIVE_LOW status.
+ * its ACTIVE_LOW status, or negative errno on failure.
  *
  * This function should be called from contexts where we cannot sleep, and will
  * complain if the GPIO chip functions potentially sleep.
@@ -1212,7 +1213,7 @@
  * @desc: gpio whose value will be returned
  *
  * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into
- * account.
+ * account, or negative errno on failure.
  *
  * This function should be called from contexts where we cannot sleep, and will
  * complain if the GPIO chip functions potentially sleep.
@@ -1226,6 +1227,9 @@
 	WARN_ON(desc->chip->can_sleep);
 
 	value = _gpiod_get_raw_value(desc);
+	if (value < 0)
+		return value;
+
 	if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
 		value = !value;
 
@@ -1548,7 +1552,7 @@
  * @desc: gpio whose value will be returned
  *
  * Return the GPIO's raw value, i.e. the value of the physical line disregarding
- * its ACTIVE_LOW status.
+ * its ACTIVE_LOW status, or negative errno on failure.
  *
  * This function is to be called from contexts that can sleep.
  */
@@ -1566,7 +1570,7 @@
  * @desc: gpio whose value will be returned
  *
  * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into
- * account.
+ * account, or negative errno on failure.
  *
  * This function is to be called from contexts that can sleep.
  */
@@ -1579,6 +1583,9 @@
 		return 0;
 
 	value = _gpiod_get_raw_value(desc);
+	if (value < 0)
+		return value;
+
 	if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
 		value = !value;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 668939a..6647fb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -82,6 +82,7 @@
 extern int amdgpu_enable_scheduler;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
+extern int amdgpu_enable_semaphores;
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
@@ -432,7 +433,7 @@
 void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
 
-void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 				   struct amdgpu_irq_src *irq_src,
 				   unsigned irq_type);
@@ -890,7 +891,7 @@
 	struct amdgpu_device		*adev;
 	const struct amdgpu_ring_funcs	*funcs;
 	struct amdgpu_fence_driver	fence_drv;
-	struct amd_gpu_scheduler 	*scheduler;
+	struct amd_gpu_scheduler 	sched;
 
 	spinlock_t              fence_lock;
 	struct mutex		*ring_lock;
@@ -1201,8 +1202,6 @@
 	struct amdgpu_irq_src		priv_inst_irq;
 	/* gfx status */
 	uint32_t gfx_current_status;
-	/* sync signal for const engine */
-	unsigned ce_sync_offs;
 	/* ce ram size*/
 	unsigned ce_ram_size;
 };
@@ -1274,8 +1273,10 @@
 	uint32_t		num_ibs;
 	struct mutex            job_lock;
 	struct amdgpu_user_fence uf;
-	int (*free_job)(struct amdgpu_job *sched_job);
+	int (*free_job)(struct amdgpu_job *job);
 };
+#define to_amdgpu_job(sched_job)		\
+		container_of((sched_job), struct amdgpu_job, base)
 
 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
 {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 496ed21..84d68d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -183,7 +183,7 @@
 		return -ENOMEM;
 
 	r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
-			AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo);
+			     AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
 	if (r) {
 		dev_err(rdev->dev,
 			"failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 98d59ee..cd639c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -79,7 +79,8 @@
 	int time;
 
 	n = AMDGPU_BENCHMARK_ITERATIONS;
-	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
+	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
+			     NULL, &sobj);
 	if (r) {
 		goto out_cleanup;
 	}
@@ -91,7 +92,8 @@
 	if (r) {
 		goto out_cleanup;
 	}
-	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
+	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
+			     NULL, &dobj);
 	if (r) {
 		goto out_cleanup;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 6b1243f..1c3fc99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -86,7 +86,7 @@
 
 	struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
 	ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
-			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
+			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
 	if (ret)
 		return ret;
 	ret = amdgpu_bo_reserve(bo, false);
@@ -197,7 +197,8 @@
 
 	ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
 					  true, domain, flags,
-					  NULL, &placement, &obj);
+					  NULL, &placement, NULL,
+					  &obj);
 	if (ret) {
 		DRM_ERROR("(%d) bo create failed\n", ret);
 		return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 3b355ae..749420f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -154,42 +154,41 @@
 {
 	union drm_amdgpu_cs *cs = data;
 	uint64_t *chunk_array_user;
-	uint64_t *chunk_array = NULL;
+	uint64_t *chunk_array;
 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 	unsigned size, i;
-	int r = 0;
+	int ret;
 
-	if (!cs->in.num_chunks)
-		goto out;
+	if (cs->in.num_chunks == 0)
+		return 0;
+
+	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+	if (!chunk_array)
+		return -ENOMEM;
 
 	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
 	if (!p->ctx) {
-		r = -EINVAL;
-		goto out;
+		ret = -EINVAL;
+		goto free_chunk;
 	}
+
 	p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
 
 	/* get chunks */
 	INIT_LIST_HEAD(&p->validated);
-	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
-	if (chunk_array == NULL) {
-		r = -ENOMEM;
-		goto out;
-	}
-
 	chunk_array_user = (uint64_t __user *)(cs->in.chunks);
 	if (copy_from_user(chunk_array, chunk_array_user,
 			   sizeof(uint64_t)*cs->in.num_chunks)) {
-		r = -EFAULT;
-		goto out;
+		ret = -EFAULT;
+		goto put_bo_list;
 	}
 
 	p->nchunks = cs->in.num_chunks;
 	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 			    GFP_KERNEL);
-	if (p->chunks == NULL) {
-		r = -ENOMEM;
-		goto out;
+	if (!p->chunks) {
+		ret = -ENOMEM;
+		goto put_bo_list;
 	}
 
 	for (i = 0; i < p->nchunks; i++) {
@@ -200,8 +199,9 @@
 		chunk_ptr = (void __user *)chunk_array[i];
 		if (copy_from_user(&user_chunk, chunk_ptr,
 				       sizeof(struct drm_amdgpu_cs_chunk))) {
-			r = -EFAULT;
-			goto out;
+			ret = -EFAULT;
+			i--;
+			goto free_partial_kdata;
 		}
 		p->chunks[i].chunk_id = user_chunk.chunk_id;
 		p->chunks[i].length_dw = user_chunk.length_dw;
@@ -212,13 +212,14 @@
 
 		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
 		if (p->chunks[i].kdata == NULL) {
-			r = -ENOMEM;
-			goto out;
+			ret = -ENOMEM;
+			i--;
+			goto free_partial_kdata;
 		}
 		size *= sizeof(uint32_t);
 		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
-			r = -EFAULT;
-			goto out;
+			ret = -EFAULT;
+			goto free_partial_kdata;
 		}
 
 		switch (p->chunks[i].chunk_id) {
@@ -238,15 +239,15 @@
 				gobj = drm_gem_object_lookup(p->adev->ddev,
 							     p->filp, handle);
 				if (gobj == NULL) {
-					r = -EINVAL;
-					goto out;
+					ret = -EINVAL;
+					goto free_partial_kdata;
 				}
 
 				p->uf.bo = gem_to_amdgpu_bo(gobj);
 				p->uf.offset = fence_data->offset;
 			} else {
-				r = -EINVAL;
-				goto out;
+				ret = -EINVAL;
+				goto free_partial_kdata;
 			}
 			break;
 
@@ -254,19 +255,35 @@
 			break;
 
 		default:
-			r = -EINVAL;
-			goto out;
+			ret = -EINVAL;
+			goto free_partial_kdata;
 		}
 	}
 
 
 	p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
-	if (!p->ibs)
-		r = -ENOMEM;
+	if (!p->ibs) {
+		ret = -ENOMEM;
+		goto free_all_kdata;
+	}
 
-out:
 	kfree(chunk_array);
-	return r;
+	return 0;
+
+free_all_kdata:
+	i = p->nchunks - 1;
+free_partial_kdata:
+	for (; i >= 0; i--)
+		drm_free_large(p->chunks[i].kdata);
+	kfree(p->chunks);
+put_bo_list:
+	if (p->bo_list)
+		amdgpu_bo_list_put(p->bo_list);
+	amdgpu_ctx_put(p->ctx);
+free_chunk:
+	kfree(chunk_array);
+
+	return ret;
 }
 
 /* Returns how many bytes TTM can move per IB.
@@ -321,25 +338,17 @@
 	return max(bytes_moved_threshold, 1024*1024ull);
 }
 
-int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
+int amdgpu_cs_list_validate(struct amdgpu_device *adev,
+			    struct amdgpu_vm *vm,
+			    struct list_head *validated)
 {
-	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
-	struct amdgpu_vm *vm = &fpriv->vm;
-	struct amdgpu_device *adev = p->adev;
 	struct amdgpu_bo_list_entry *lobj;
-	struct list_head duplicates;
 	struct amdgpu_bo *bo;
 	u64 bytes_moved = 0, initial_bytes_moved;
 	u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
 	int r;
 
-	INIT_LIST_HEAD(&duplicates);
-	r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
-	if (unlikely(r != 0)) {
-		return r;
-	}
-
-	list_for_each_entry(lobj, &p->validated, tv.head) {
+	list_for_each_entry(lobj, validated, tv.head) {
 		bo = lobj->robj;
 		if (!bo->pin_count) {
 			u32 domain = lobj->prefered_domains;
@@ -373,7 +382,6 @@
 					domain = lobj->allowed_domains;
 					goto retry;
 				}
-				ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 				return r;
 			}
 		}
@@ -386,6 +394,7 @@
 {
 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 	struct amdgpu_cs_buckets buckets;
+	struct list_head duplicates;
 	bool need_mmap_lock = false;
 	int i, r;
 
@@ -405,8 +414,22 @@
 	if (need_mmap_lock)
 		down_read(&current->mm->mmap_sem);
 
-	r = amdgpu_cs_list_validate(p);
+	INIT_LIST_HEAD(&duplicates);
+	r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
+	if (unlikely(r != 0))
+		goto error_reserve;
 
+	r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
+	if (r)
+		goto error_validate;
+
+	r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
+
+error_validate:
+	if (r)
+		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+
+error_reserve:
 	if (need_mmap_lock)
 		up_read(&current->mm->mmap_sem);
 
@@ -772,15 +795,15 @@
 	return 0;
 }
 
-static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
+static int amdgpu_cs_free_job(struct amdgpu_job *job)
 {
 	int i;
-	if (sched_job->ibs)
-		for (i = 0; i < sched_job->num_ibs; i++)
-			amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
-	kfree(sched_job->ibs);
-	if (sched_job->uf.bo)
-		drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
+	if (job->ibs)
+		for (i = 0; i < job->num_ibs; i++)
+			amdgpu_ib_free(job->adev, &job->ibs[i]);
+	kfree(job->ibs);
+	if (job->uf.bo)
+		drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
 	return 0;
 }
 
@@ -804,7 +827,7 @@
 	r = amdgpu_cs_parser_init(parser, data);
 	if (r) {
 		DRM_ERROR("Failed to initialize parser !\n");
-		amdgpu_cs_parser_fini(parser, r, false);
+		kfree(parser);
 		up_read(&adev->exclusive_lock);
 		r = amdgpu_cs_handle_lockup(adev, r);
 		return r;
@@ -842,7 +865,7 @@
 		job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
 		if (!job)
 			return -ENOMEM;
-		job->base.sched = ring->scheduler;
+		job->base.sched = &ring->sched;
 		job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
 		job->adev = parser->adev;
 		job->ibs = parser->ibs;
@@ -857,7 +880,7 @@
 
 		job->free_job = amdgpu_cs_free_job;
 		mutex_lock(&job->job_lock);
-		r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+		r = amd_sched_entity_push_job(&job->base);
 		if (r) {
 			mutex_unlock(&job->job_lock);
 			amdgpu_cs_free_job(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 20cbc4e..e0b80cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,10 +43,10 @@
 		for (i = 0; i < adev->num_rings; i++) {
 			struct amd_sched_rq *rq;
 			if (kernel)
-				rq = &adev->rings[i]->scheduler->kernel_rq;
+				rq = &adev->rings[i]->sched.kernel_rq;
 			else
-				rq = &adev->rings[i]->scheduler->sched_rq;
-			r = amd_sched_entity_init(adev->rings[i]->scheduler,
+				rq = &adev->rings[i]->sched.sched_rq;
+			r = amd_sched_entity_init(&adev->rings[i]->sched,
 						  &ctx->rings[i].entity,
 						  rq, amdgpu_sched_jobs);
 			if (r)
@@ -55,7 +55,7 @@
 
 		if (i < adev->num_rings) {
 			for (j = 0; j < i; j++)
-				amd_sched_entity_fini(adev->rings[j]->scheduler,
+				amd_sched_entity_fini(&adev->rings[j]->sched,
 						      &ctx->rings[j].entity);
 			kfree(ctx);
 			return r;
@@ -75,7 +75,7 @@
 
 	if (amdgpu_enable_scheduler) {
 		for (i = 0; i < adev->num_rings; i++)
-			amd_sched_entity_fini(adev->rings[i]->scheduler,
+			amd_sched_entity_fini(&adev->rings[i]->sched,
 					      &ctx->rings[i].entity);
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6ff6ae9..6068d82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -246,7 +246,7 @@
 		r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
 				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
 				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-				     NULL, &adev->vram_scratch.robj);
+				     NULL, NULL, &adev->vram_scratch.robj);
 		if (r) {
 			return r;
 		}
@@ -449,7 +449,8 @@
 
 	if (adev->wb.wb_obj == NULL) {
 		r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
-				     AMDGPU_GEM_DOMAIN_GTT, 0,  NULL, &adev->wb.wb_obj);
+				     AMDGPU_GEM_DOMAIN_GTT, 0,  NULL, NULL,
+				     &adev->wb.wb_obj);
 		if (r) {
 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
 			return r;
@@ -1650,9 +1651,11 @@
 	drm_kms_helper_poll_disable(dev);
 
 	/* turn off display hw */
+	drm_modeset_lock_all(dev);
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
 	}
+	drm_modeset_unlock_all(dev);
 
 	/* unpin the front buffers */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1747,9 +1750,11 @@
 	if (fbcon) {
 		drm_helper_resume_force_mode(dev);
 		/* turn on display hw */
+		drm_modeset_lock_all(dev);
 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
 		}
+		drm_modeset_unlock_all(dev);
 	}
 
 	drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0fcc0bd..adb4835 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -79,6 +79,7 @@
 int amdgpu_enable_scheduler = 0;
 int amdgpu_sched_jobs = 16;
 int amdgpu_sched_hw_submission = 2;
+int amdgpu_enable_semaphores = 1;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -152,6 +153,9 @@
 MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
 module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
 
+MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)");
+module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
+
 static struct pci_device_id pciidlist[] = {
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	/* Kaveri */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 1be2bd6..b3fc26c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -609,9 +609,9 @@
  * Init the fence driver for the requested ring (all asics).
  * Helper function for amdgpu_fence_driver_init().
  */
-void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
 {
-	int i;
+	int i, r;
 
 	ring->fence_drv.cpu_addr = NULL;
 	ring->fence_drv.gpu_addr = 0;
@@ -625,15 +625,19 @@
 			amdgpu_fence_check_lockup);
 	ring->fence_drv.ring = ring;
 
+	init_waitqueue_head(&ring->fence_drv.fence_queue);
+
 	if (amdgpu_enable_scheduler) {
-		ring->scheduler = amd_sched_create(&amdgpu_sched_ops,
-						   ring->idx,
-						   amdgpu_sched_hw_submission,
-						   (void *)ring->adev);
-		if (!ring->scheduler)
-			DRM_ERROR("Failed to create scheduler on ring %d.\n",
-				  ring->idx);
+		r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
+				   amdgpu_sched_hw_submission, ring->name);
+		if (r) {
+			DRM_ERROR("Failed to create scheduler on ring %s.\n",
+				  ring->name);
+			return r;
+		}
 	}
+
+	return 0;
 }
 
 /**
@@ -681,8 +685,7 @@
 		wake_up_all(&ring->fence_drv.fence_queue);
 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
 			       ring->fence_drv.irq_type);
-		if (ring->scheduler)
-			amd_sched_destroy(ring->scheduler);
+		amd_sched_fini(&ring->sched);
 		ring->fence_drv.initialized = false;
 	}
 	mutex_unlock(&adev->ring_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index cbd3a48..7312d72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -127,7 +127,7 @@
 		r = amdgpu_bo_create(adev, adev->gart.table_size,
 				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
 				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-				     NULL, &adev->gart.robj);
+				     NULL, NULL, &adev->gart.robj);
 		if (r) {
 			return r;
 		}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 5839fab..7297ca3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -69,7 +69,8 @@
 		}
 	}
 retry:
-	r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj);
+	r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
+			     flags, NULL, NULL, &robj);
 	if (r) {
 		if (r != -ERESTARTSYS) {
 			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -426,6 +427,10 @@
 					   &args->data.data_size_bytes,
 					   &args->data.flags);
 	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
+		if (args->data.data_size_bytes > sizeof(args->data.data)) {
+			r = -EINVAL;
+			goto unreserve;
+		}
 		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
 		if (!r)
 			r = amdgpu_bo_set_metadata(robj, args->data.data,
@@ -433,6 +438,7 @@
 						   args->data.flags);
 	}
 
+unreserve:
 	amdgpu_bo_unreserve(robj);
 out:
 	drm_gem_object_unreference_unlocked(gobj);
@@ -454,11 +460,12 @@
 	struct ttm_validate_buffer tv, *entry;
 	struct amdgpu_bo_list_entry *vm_bos;
 	struct ww_acquire_ctx ticket;
-	struct list_head list;
+	struct list_head list, duplicates;
 	unsigned domain;
 	int r;
 
 	INIT_LIST_HEAD(&list);
+	INIT_LIST_HEAD(&duplicates);
 
 	tv.bo = &bo_va->bo->tbo;
 	tv.shared = true;
@@ -468,7 +475,8 @@
 	if (!vm_bos)
 		return;
 
-	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+	/* Provide duplicates to avoid -EALREADY */
+	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
 	if (r)
 		goto error_free;
 
@@ -651,7 +659,7 @@
 	int r;
 
 	args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
-	args->size = args->pitch * args->height;
+	args->size = (u64)args->pitch * args->height;
 	args->size = ALIGN(args->size, PAGE_SIZE);
 
 	r = amdgpu_gem_object_create(adev, args->size, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 5c8a803..534fc04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -43,7 +43,7 @@
 		r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
 				     PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_GTT, 0,
-				     NULL, &adev->irq.ih.ring_obj);
+				     NULL, NULL, &adev->irq.ih.ring_obj);
 		if (r) {
 			DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
 			return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 0aba8e9..7c42ff6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -140,7 +140,7 @@
  */
 int amdgpu_irq_postinstall(struct drm_device *dev)
 {
-	dev->max_vblank_count = 0x001fffff;
+	dev->max_vblank_count = 0x00ffffff;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 2236793..8c735f54 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -390,7 +390,7 @@
 				    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
 	}
 	case AMDGPU_INFO_READ_MMR_REG: {
-		unsigned n, alloc_size = info->read_mmr_reg.count * 4;
+		unsigned n, alloc_size;
 		uint32_t *regs;
 		unsigned se_num = (info->read_mmr_reg.instance >>
 				   AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
@@ -406,9 +406,10 @@
 		if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
 			sh_num = 0xffffffff;
 
-		regs = kmalloc(alloc_size, GFP_KERNEL);
+		regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
 		if (!regs)
 			return -ENOMEM;
+		alloc_size = info->read_mmr_reg.count * sizeof(*regs);
 
 		for (i = 0; i < info->read_mmr_reg.count; i++)
 			if (amdgpu_asic_read_register(adev, se_num, sh_num,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 08b09d5..1a7708f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -215,6 +215,7 @@
 				bool kernel, u32 domain, u64 flags,
 				struct sg_table *sg,
 				struct ttm_placement *placement,
+				struct reservation_object *resv,
 				struct amdgpu_bo **bo_ptr)
 {
 	struct amdgpu_bo *bo;
@@ -261,7 +262,7 @@
 	/* Kernel allocation are uninterruptible */
 	r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
 			&bo->placement, page_align, !kernel, NULL,
-			acc_size, sg, NULL, &amdgpu_ttm_bo_destroy);
+			acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
 	if (unlikely(r != 0)) {
 		return r;
 	}
@@ -275,7 +276,9 @@
 int amdgpu_bo_create(struct amdgpu_device *adev,
 		     unsigned long size, int byte_align,
 		     bool kernel, u32 domain, u64 flags,
-		     struct sg_table *sg, struct amdgpu_bo **bo_ptr)
+		     struct sg_table *sg,
+		     struct reservation_object *resv,
+		     struct amdgpu_bo **bo_ptr)
 {
 	struct ttm_placement placement = {0};
 	struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
@@ -286,11 +289,9 @@
 	amdgpu_ttm_placement_init(adev, &placement,
 				  placements, domain, flags);
 
-	return amdgpu_bo_create_restricted(adev, size, byte_align,
-					   kernel, domain, flags,
-					   sg,
-					   &placement,
-					   bo_ptr);
+	return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+					   domain, flags, sg, &placement,
+					   resv, bo_ptr);
 }
 
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -535,12 +536,10 @@
 	if (metadata == NULL)
 		return -EINVAL;
 
-	buffer = kzalloc(metadata_size, GFP_KERNEL);
+	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
 	if (buffer == NULL)
 		return -ENOMEM;
 
-	memcpy(buffer, metadata, metadata_size);
-
 	kfree(bo->metadata);
 	bo->metadata_flags = flags;
 	bo->metadata = buffer;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 6ea18dc..3c2ff45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -129,12 +129,14 @@
 			    unsigned long size, int byte_align,
 			    bool kernel, u32 domain, u64 flags,
 			    struct sg_table *sg,
+			    struct reservation_object *resv,
 			    struct amdgpu_bo **bo_ptr);
 int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 				unsigned long size, int byte_align,
 				bool kernel, u32 domain, u64 flags,
 				struct sg_table *sg,
 				struct ttm_placement *placement,
+			        struct reservation_object *resv,
 				struct amdgpu_bo **bo_ptr);
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index d9652fe..59f735a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -61,12 +61,15 @@
 							struct dma_buf_attachment *attach,
 							struct sg_table *sg)
 {
+	struct reservation_object *resv = attach->dmabuf->resv;
 	struct amdgpu_device *adev = dev->dev_private;
 	struct amdgpu_bo *bo;
 	int ret;
 
+	ww_mutex_lock(&resv->lock, NULL);
 	ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
-			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
+			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
+	ww_mutex_unlock(&resv->lock);
 	if (ret)
 		return ERR_PTR(ret);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 9bec914..30dce23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -357,11 +357,11 @@
 		ring->adev = adev;
 		ring->idx = adev->num_rings++;
 		adev->rings[ring->idx] = ring;
-		amdgpu_fence_driver_init_ring(ring);
+		r = amdgpu_fence_driver_init_ring(ring);
+		if (r)
+			return r;
 	}
 
-	init_waitqueue_head(&ring->fence_drv.fence_queue);
-
 	r = amdgpu_wb_get(adev, &ring->rptr_offs);
 	if (r) {
 		dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
@@ -407,7 +407,7 @@
 	if (ring->ring_obj == NULL) {
 		r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_GTT, 0,
-				     NULL, &ring->ring_obj);
+				     NULL, NULL, &ring->ring_obj);
 		if (r) {
 			dev_err(adev->dev, "(%d) ring create failed\n", r);
 			return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 74dad27..e907124 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -64,8 +64,8 @@
 		INIT_LIST_HEAD(&sa_manager->flist[i]);
 	}
 
-	r = amdgpu_bo_create(adev, size, align, true,
-			     domain, 0, NULL, &sa_manager->bo);
+	r = amdgpu_bo_create(adev, size, align, true, domain,
+			     0, NULL, NULL, &sa_manager->bo);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
 		return r;
@@ -145,8 +145,13 @@
 	struct amd_sched_fence *s_fence;
 
 	s_fence = to_amd_sched_fence(f);
-	if (s_fence)
-		return s_fence->scheduler->ring_id;
+	if (s_fence) {
+		struct amdgpu_ring *ring;
+
+		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
+		return ring->idx;
+	}
+
 	a_fence = to_amdgpu_fence(f);
 	if (a_fence)
 		return a_fence->ring->idx;
@@ -412,6 +417,26 @@
 }
 
 #if defined(CONFIG_DEBUG_FS)
+
+static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
+{
+	struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
+	struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
+
+	if (a_fence)
+		seq_printf(m, " protected by 0x%016llx on ring %d",
+			   a_fence->seq, a_fence->ring->idx);
+
+	if (s_fence) {
+		struct amdgpu_ring *ring;
+
+
+		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
+		seq_printf(m, " protected by 0x%016x on ring %d",
+			   s_fence->base.seqno, ring->idx);
+	}
+}
+
 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
 				  struct seq_file *m)
 {
@@ -428,18 +453,8 @@
 		}
 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
 			   soffset, eoffset, eoffset - soffset);
-		if (i->fence) {
-			struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence);
-			struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
-			if (a_fence)
-				seq_printf(m, " protected by 0x%016llx on ring %d",
-					   a_fence->seq, a_fence->ring->idx);
-			if (s_fence)
-				seq_printf(m, " protected by 0x%016x on ring %d",
-					   s_fence->base.seqno,
-					   s_fence->scheduler->ring_id);
-
-		}
+		if (i->fence)
+			amdgpu_sa_bo_dump_fence(i->fence, m);
 		seq_printf(m, "\n");
 	}
 	spin_unlock(&sa_manager->wq.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index de98fbd..2e946b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -27,63 +27,48 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 
-static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
+static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
 {
-	struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
-	return amdgpu_sync_get_fence(&sched_job->ibs->sync);
+	struct amdgpu_job *job = to_amdgpu_job(sched_job);
+	return amdgpu_sync_get_fence(&job->ibs->sync);
 }
 
-static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
+static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
 {
-	struct amdgpu_job *sched_job;
-	struct amdgpu_fence *fence;
+	struct amdgpu_fence *fence = NULL;
+	struct amdgpu_job *job;
 	int r;
 
-	if (!job) {
+	if (!sched_job) {
 		DRM_ERROR("job is null\n");
 		return NULL;
 	}
-	sched_job = (struct amdgpu_job *)job;
-	mutex_lock(&sched_job->job_lock);
-	r = amdgpu_ib_schedule(sched_job->adev,
-			       sched_job->num_ibs,
-			       sched_job->ibs,
-			       sched_job->base.owner);
-	if (r)
+	job = to_amdgpu_job(sched_job);
+	mutex_lock(&job->job_lock);
+	r = amdgpu_ib_schedule(job->adev,
+			       job->num_ibs,
+			       job->ibs,
+			       job->base.owner);
+	if (r) {
+		DRM_ERROR("Error scheduling IBs (%d)\n", r);
 		goto err;
-	fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
+	}
 
-	if (sched_job->free_job)
-		sched_job->free_job(sched_job);
-
-	mutex_unlock(&sched_job->job_lock);
-	return &fence->base;
+	fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence);
 
 err:
-	DRM_ERROR("Run job error\n");
-	mutex_unlock(&sched_job->job_lock);
-	job->sched->ops->process_job(job);
-	return NULL;
-}
+	if (job->free_job)
+		job->free_job(job);
 
-static void amdgpu_sched_process_job(struct amd_sched_job *job)
-{
-	struct amdgpu_job *sched_job;
-
-	if (!job) {
-		DRM_ERROR("job is null\n");
-		return;
-	}
-	sched_job = (struct amdgpu_job *)job;
-	/* after processing job, free memory */
-	fence_put(&sched_job->base.s_fence->base);
-	kfree(sched_job);
+	mutex_unlock(&job->job_lock);
+	fence_put(&job->base.s_fence->base);
+	kfree(job);
+	return fence ? &fence->base : NULL;
 }
 
 struct amd_sched_backend_ops amdgpu_sched_ops = {
 	.dependency = amdgpu_sched_dependency,
 	.run_job = amdgpu_sched_run_job,
-	.process_job = amdgpu_sched_process_job
 };
 
 int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
@@ -100,7 +85,7 @@
 			kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
 		if (!job)
 			return -ENOMEM;
-		job->base.sched = ring->scheduler;
+		job->base.sched = &ring->sched;
 		job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
 		job->adev = adev;
 		job->ibs = ibs;
@@ -109,7 +94,7 @@
 		mutex_init(&job->job_lock);
 		job->free_job = free_job;
 		mutex_lock(&job->job_lock);
-		r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+		r = amd_sched_entity_push_job(&job->base);
 		if (r) {
 			mutex_unlock(&job->job_lock);
 			kfree(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 068aeaf..4921de1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -65,8 +65,14 @@
 
 	if (a_fence)
 		return a_fence->ring->adev == adev;
-	if (s_fence)
-		return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
+
+	if (s_fence) {
+		struct amdgpu_ring *ring;
+
+		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
+		return ring->adev == adev;
+	}
+
 	return false;
 }
 
@@ -251,6 +257,20 @@
 		fence_put(e->fence);
 		kfree(e);
 	}
+
+	if (amdgpu_enable_semaphores)
+		return 0;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		struct amdgpu_fence *fence = sync->sync_to[i];
+		if (!fence)
+			continue;
+
+		r = fence_wait(&fence->base, false);
+		if (r)
+			return r;
+	}
+
 	return 0;
 }
 
@@ -285,7 +305,8 @@
 			return -EINVAL;
 		}
 
-		if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
+		if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
+		    (count >= AMDGPU_NUM_SYNCS)) {
 			/* not enough room, wait manually */
 			r = fence_wait(&fence->base, false);
 			if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index f80b1a4..4865615 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -59,8 +59,9 @@
 		goto out_cleanup;
 	}
 
-	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
-			     NULL, &vram_obj);
+	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
+			     AMDGPU_GEM_DOMAIN_VRAM, 0,
+			     NULL, NULL, &vram_obj);
 	if (r) {
 		DRM_ERROR("Failed to create VRAM object\n");
 		goto out_cleanup;
@@ -80,7 +81,8 @@
 		struct fence *fence = NULL;
 
 		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
-				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
+				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+				     NULL, gtt_obj + i);
 		if (r) {
 			DRM_ERROR("Failed to create GTT object %d\n", i);
 			goto out_lclean;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b5abd5c..364cbe9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -861,7 +861,7 @@
 	r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			     NULL, &adev->stollen_vga_memory);
+			     NULL, NULL, &adev->stollen_vga_memory);
 	if (r) {
 		return r;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 482e667..5cc95f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -247,7 +247,7 @@
 	const struct common_firmware_header *header = NULL;
 
 	err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
-			AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo);
+			AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
 	if (err) {
 		dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
 		err = -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2cf6c6b..d0312364 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -156,7 +156,7 @@
 	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			     NULL, &adev->uvd.vcpu_bo);
+			     NULL, NULL, &adev->uvd.vcpu_bo);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
 		return r;
@@ -543,46 +543,60 @@
 		return -EINVAL;
 	}
 
-	if (msg_type == 1) {
+	switch (msg_type) {
+	case 0:
+		/* it's a create msg, calc image size (width * height) */
+		amdgpu_bo_kunmap(bo);
+
+		/* try to alloc a new handle */
+		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+			if (atomic_read(&adev->uvd.handles[i]) == handle) {
+				DRM_ERROR("Handle 0x%x already in use!\n", handle);
+				return -EINVAL;
+			}
+
+			if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
+				adev->uvd.filp[i] = ctx->parser->filp;
+				return 0;
+			}
+		}
+
+		DRM_ERROR("No more free UVD handles!\n");
+		return -EINVAL;
+
+	case 1:
 		/* it's a decode msg, calc buffer sizes */
 		r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
 		amdgpu_bo_kunmap(bo);
 		if (r)
 			return r;
 
-	} else if (msg_type == 2) {
+		/* validate the handle */
+		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+			if (atomic_read(&adev->uvd.handles[i]) == handle) {
+				if (adev->uvd.filp[i] != ctx->parser->filp) {
+					DRM_ERROR("UVD handle collision detected!\n");
+					return -EINVAL;
+				}
+				return 0;
+			}
+		}
+
+		DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
+		return -ENOENT;
+
+	case 2:
 		/* it's a destroy msg, free the handle */
 		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
 			atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
 		amdgpu_bo_kunmap(bo);
 		return 0;
-	} else {
-		/* it's a create msg */
-		amdgpu_bo_kunmap(bo);
 
-		if (msg_type != 0) {
-			DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
-			return -EINVAL;
-		}
-
-		/* it's a create msg, no special handling needed */
+	default:
+		DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+		return -EINVAL;
 	}
-
-	/* create or decode, validate the handle */
-	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
-		if (atomic_read(&adev->uvd.handles[i]) == handle)
-			return 0;
-	}
-
-	/* handle not found try to alloc a new one */
-	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
-		if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
-			adev->uvd.filp[i] = ctx->parser->filp;
-			return 0;
-		}
-	}
-
-	DRM_ERROR("No more free UVD handles!\n");
+	BUG();
 	return -EINVAL;
 }
 
@@ -805,10 +819,10 @@
 }
 
 static int amdgpu_uvd_free_job(
-	struct amdgpu_job *sched_job)
+	struct amdgpu_job *job)
 {
-	amdgpu_ib_free(sched_job->adev, sched_job->ibs);
-	kfree(sched_job->ibs);
+	amdgpu_ib_free(job->adev, job->ibs);
+	kfree(job->ibs);
 	return 0;
 }
 
@@ -905,7 +919,7 @@
 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			     NULL, &bo);
+			     NULL, NULL, &bo);
 	if (r)
 		return r;
 
@@ -954,7 +968,7 @@
 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			     NULL, &bo);
+			     NULL, NULL, &bo);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 3cab96c..74f2038a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -143,7 +143,7 @@
 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			     NULL, &adev->vce.vcpu_bo);
+			     NULL, NULL, &adev->vce.vcpu_bo);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
 		return r;
@@ -342,10 +342,10 @@
 }
 
 static int amdgpu_vce_free_job(
-	struct amdgpu_job *sched_job)
+	struct amdgpu_job *job)
 {
-	amdgpu_ib_free(sched_job->adev, sched_job->ibs);
-	kfree(sched_job->ibs);
+	amdgpu_ib_free(job->adev, job->ibs);
+	kfree(job->ibs);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f68b7cd..1e14531 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -316,12 +316,12 @@
 	}
 }
 
-int amdgpu_vm_free_job(struct amdgpu_job *sched_job)
+int amdgpu_vm_free_job(struct amdgpu_job *job)
 {
 	int i;
-	for (i = 0; i < sched_job->num_ibs; i++)
-		amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
-	kfree(sched_job->ibs);
+	for (i = 0; i < job->num_ibs; i++)
+		amdgpu_ib_free(job->adev, &job->ibs[i]);
+	kfree(job->ibs);
 	return 0;
 }
 
@@ -686,31 +686,6 @@
 }
 
 /**
- * amdgpu_vm_fence_pts - fence page tables after an update
- *
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- * @fence: fence to use
- *
- * Fence the page tables in the range @start - @end (cayman+).
- *
- * Global and local mutex must be locked!
- */
-static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
-				uint64_t start, uint64_t end,
-				struct fence *fence)
-{
-	unsigned i;
-
-	start >>= amdgpu_vm_block_size;
-	end >>= amdgpu_vm_block_size;
-
-	for (i = start; i <= end; ++i)
-		amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
-}
-
-/**
  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
  *
  * @adev: amdgpu_device pointer
@@ -813,8 +788,7 @@
 	if (r)
 		goto error_free;
 
-	amdgpu_vm_fence_pts(vm, mapping->it.start,
-			    mapping->it.last + 1, f);
+	amdgpu_bo_fence(vm->page_directory, f, true);
 	if (fence) {
 		fence_put(*fence);
 		*fence = fence_get(f);
@@ -855,7 +829,7 @@
 	int r;
 
 	if (mem) {
-		addr = mem->start << PAGE_SHIFT;
+		addr = (u64)mem->start << PAGE_SHIFT;
 		if (mem->mem_type != TTM_PL_TT)
 			addr += adev->vm_manager.vram_base_offset;
 	} else {
@@ -1089,6 +1063,7 @@
 
 	/* walk over the address space and allocate the page tables */
 	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
+		struct reservation_object *resv = vm->page_directory->tbo.resv;
 		struct amdgpu_bo *pt;
 
 		if (vm->page_tables[pt_idx].bo)
@@ -1097,11 +1072,13 @@
 		/* drop mutex to allocate and clear page table */
 		mutex_unlock(&vm->mutex);
 
+		ww_mutex_lock(&resv->lock, NULL);
 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
 				     AMDGPU_GPU_PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_VRAM,
 				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
-				     NULL, &pt);
+				     NULL, resv, &pt);
+		ww_mutex_unlock(&resv->lock);
 		if (r)
 			goto error_free;
 
@@ -1303,7 +1280,7 @@
 	r = amdgpu_bo_create(adev, pd_size, align, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
 			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
-			     NULL, &vm->page_directory);
+			     NULL, NULL, &vm->page_directory);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index a72ffc7..e33180d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -814,7 +814,8 @@
 	* 3. map kernel virtual address
 	*/
 	ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
-				true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf);
+			       true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
+			       toc_buf);
 
 	if (ret) {
 		dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
@@ -822,7 +823,8 @@
 	}
 
 	ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
-				true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf);
+			       true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
+			       smu_buf);
 
 	if (ret) {
 		dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index 322edea..bda1249 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -764,7 +764,7 @@
 	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
 			       true, AMDGPU_GEM_DOMAIN_VRAM,
 			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			       NULL, toc_buf);
+			       NULL, NULL, toc_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
 		return -ENOMEM;
@@ -774,7 +774,7 @@
 	ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
 			       true, AMDGPU_GEM_DOMAIN_VRAM,
 			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			       NULL, smu_buf);
+			       NULL, NULL, smu_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 4bd1e5c..e992bf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3206,7 +3206,7 @@
 		r = amdgpu_bo_create(adev,
 				     adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
 				     PAGE_SIZE, true,
-				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
 				     &adev->gfx.mec.hpd_eop_obj);
 		if (r) {
 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -3373,7 +3373,7 @@
 			r = amdgpu_bo_create(adev,
 					     sizeof(struct bonaire_mqd),
 					     PAGE_SIZE, true,
-					     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+					     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
 					     &ring->mqd_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
@@ -3610,41 +3610,6 @@
 	return 0;
 }
 
-static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-	u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
-
-	/* instruct DE to set a magic number */
-	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-							 WRITE_DATA_DST_SEL(5)));
-	amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
-	amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
-	amdgpu_ring_write(ring, 1);
-
-	/* let CE wait till condition satisfied */
-	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
-	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
-							 WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
-							 WAIT_REG_MEM_FUNCTION(3) |  /* == */
-							 WAIT_REG_MEM_ENGINE(2)));   /* ce */
-	amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
-	amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
-	amdgpu_ring_write(ring, 1);
-	amdgpu_ring_write(ring, 0xffffffff);
-	amdgpu_ring_write(ring, 4); /* poll interval */
-
-	/* instruct CE to reset wb of ce_sync to zero */
-	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
-							 WRITE_DATA_DST_SEL(5) |
-							 WR_CONFIRM));
-	amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
-	amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
-	amdgpu_ring_write(ring, 0);
-}
-
 /*
  * vm
  * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -3663,6 +3628,13 @@
 					unsigned vm_id, uint64_t pd_addr)
 {
 	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	if (usepfp) {
+		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+	}
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3703,7 +3675,10 @@
 		amdgpu_ring_write(ring, 0x0);
 
 		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
-		gfx_v7_0_ce_sync_me(ring);
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
 	}
 }
 
@@ -3788,7 +3763,8 @@
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_VRAM,
 					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-					     NULL, &adev->gfx.rlc.save_restore_obj);
+					     NULL, NULL,
+					     &adev->gfx.rlc.save_restore_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
 				return r;
@@ -3831,7 +3807,8 @@
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_VRAM,
 					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-					     NULL, &adev->gfx.rlc.clear_state_obj);
+					     NULL, NULL,
+					     &adev->gfx.rlc.clear_state_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
 				gfx_v7_0_rlc_fini(adev);
@@ -3870,7 +3847,8 @@
 			r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_VRAM,
 					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-					     NULL, &adev->gfx.rlc.cp_table_obj);
+					     NULL, NULL,
+					     &adev->gfx.rlc.cp_table_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
 				gfx_v7_0_rlc_fini(adev);
@@ -4802,12 +4780,6 @@
 		return r;
 	}
 
-	r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
-	if (r) {
-		DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
-		return r;
-	}
-
 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
 		ring = &adev->gfx.gfx_ring[i];
 		ring->ring_obj = NULL;
@@ -4851,21 +4823,21 @@
 	r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
 			PAGE_SIZE, true,
 			AMDGPU_GEM_DOMAIN_GDS, 0,
-			NULL, &adev->gds.gds_gfx_bo);
+			NULL, NULL, &adev->gds.gds_gfx_bo);
 	if (r)
 		return r;
 
 	r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
 		PAGE_SIZE, true,
 		AMDGPU_GEM_DOMAIN_GWS, 0,
-		NULL, &adev->gds.gws_gfx_bo);
+		NULL, NULL, &adev->gds.gws_gfx_bo);
 	if (r)
 		return r;
 
 	r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
 			PAGE_SIZE, true,
 			AMDGPU_GEM_DOMAIN_OA, 0,
-			NULL, &adev->gds.oa_gfx_bo);
+			NULL, NULL, &adev->gds.oa_gfx_bo);
 	if (r)
 		return r;
 
@@ -4886,8 +4858,6 @@
 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
-	amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
-
 	gfx_v7_0_cp_compute_fini(adev);
 	gfx_v7_0_rlc_fini(adev);
 	gfx_v7_0_mec_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 53f0743..cb4f68f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -868,7 +868,7 @@
 		r = amdgpu_bo_create(adev,
 				     adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
 				     PAGE_SIZE, true,
-				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
 				     &adev->gfx.mec.hpd_eop_obj);
 		if (r) {
 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -940,12 +940,6 @@
 		return r;
 	}
 
-	r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
-	if (r) {
-		DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
-		return r;
-	}
-
 	/* set up the gfx ring */
 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
 		ring = &adev->gfx.gfx_ring[i];
@@ -995,21 +989,21 @@
 	/* reserve GDS, GWS and OA resource for gfx */
 	r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
 			PAGE_SIZE, true,
-			AMDGPU_GEM_DOMAIN_GDS, 0,
+			AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
 			NULL, &adev->gds.gds_gfx_bo);
 	if (r)
 		return r;
 
 	r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
 		PAGE_SIZE, true,
-		AMDGPU_GEM_DOMAIN_GWS, 0,
+		AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
 		NULL, &adev->gds.gws_gfx_bo);
 	if (r)
 		return r;
 
 	r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
 			PAGE_SIZE, true,
-			AMDGPU_GEM_DOMAIN_OA, 0,
+			AMDGPU_GEM_DOMAIN_OA, 0, NULL,
 			NULL, &adev->gds.oa_gfx_bo);
 	if (r)
 		return r;
@@ -1033,8 +1027,6 @@
 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
-	amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
-
 	gfx_v8_0_mec_fini(adev);
 
 	return 0;
@@ -3106,7 +3098,7 @@
 					     sizeof(struct vi_mqd),
 					     PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
-					     &ring->mqd_obj);
+					     NULL, &ring->mqd_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
 				return r;
@@ -3965,6 +3957,7 @@
 			  DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
 	amdgpu_ring_write(ring, lower_32_bits(seq));
 	amdgpu_ring_write(ring, upper_32_bits(seq));
+
 }
 
 /**
@@ -4005,49 +3998,34 @@
 	return true;
 }
 
-static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-	u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
-
-	/* instruct DE to set a magic number */
-	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-							 WRITE_DATA_DST_SEL(5)));
-	amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
-	amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
-	amdgpu_ring_write(ring, 1);
-
-	/* let CE wait till condition satisfied */
-	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
-	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
-							 WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
-							 WAIT_REG_MEM_FUNCTION(3) |  /* == */
-							 WAIT_REG_MEM_ENGINE(2)));   /* ce */
-	amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
-	amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
-	amdgpu_ring_write(ring, 1);
-	amdgpu_ring_write(ring, 0xffffffff);
-	amdgpu_ring_write(ring, 4); /* poll interval */
-
-	/* instruct CE to reset wb of ce_sync to zero */
-	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
-							 WRITE_DATA_DST_SEL(5) |
-							 WR_CONFIRM));
-	amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
-	amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
-	amdgpu_ring_write(ring, 0);
-}
-
 static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 					unsigned vm_id, uint64_t pd_addr)
 {
 	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
+	uint64_t addr = ring->fence_drv.gpu_addr;
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+		 WAIT_REG_MEM_FUNCTION(3))); /* equal */
+	amdgpu_ring_write(ring, addr & 0xfffffffc);
+	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+	amdgpu_ring_write(ring, seq);
+	amdgpu_ring_write(ring, 0xffffffff);
+	amdgpu_ring_write(ring, 4); /* poll interval */
+
+	if (usepfp) {
+		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+	}
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
-				 WRITE_DATA_DST_SEL(0)));
+				 WRITE_DATA_DST_SEL(0)) |
+				 WR_CONFIRM);
 	if (vm_id < 8) {
 		amdgpu_ring_write(ring,
 				  (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
@@ -4083,9 +4061,10 @@
 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
 		amdgpu_ring_write(ring, 0x0);
-
-		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
-		gfx_v8_0_ce_sync_me(ring);
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index c900aa9..966d4b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -625,7 +625,7 @@
 	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
 			       true, AMDGPU_GEM_DOMAIN_VRAM,
 			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			       NULL, toc_buf);
+			       NULL, NULL, toc_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 1f5ac94..5421309 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -763,7 +763,7 @@
 	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
 			       true, AMDGPU_GEM_DOMAIN_VRAM,
 			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			       NULL, toc_buf);
+			       NULL, NULL, toc_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
 		return -ENOMEM;
@@ -773,7 +773,7 @@
 	ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
 			       true, AMDGPU_GEM_DOMAIN_VRAM,
 			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			       NULL, smu_buf);
+			       NULL, NULL, smu_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 5fac5da..ed50dd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -224,11 +224,11 @@
 	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	r = uvd_v4_2_hw_fini(adev);
+	r = amdgpu_uvd_suspend(adev);
 	if (r)
 		return r;
 
-	r = amdgpu_uvd_suspend(adev);
+	r = uvd_v4_2_hw_fini(adev);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 2d5c59c..9ad8b99 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -220,11 +220,11 @@
 	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	r = uvd_v5_0_hw_fini(adev);
+	r = amdgpu_uvd_suspend(adev);
 	if (r)
 		return r;
 
-	r = amdgpu_uvd_suspend(adev);
+	r = uvd_v5_0_hw_fini(adev);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d9f553f..7e9934f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -214,14 +214,16 @@
 	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	/* Skip this for APU for now */
+	if (!(adev->flags & AMD_IS_APU)) {
+		r = amdgpu_uvd_suspend(adev);
+		if (r)
+			return r;
+	}
 	r = uvd_v6_0_hw_fini(adev);
 	if (r)
 		return r;
 
-	r = amdgpu_uvd_suspend(adev);
-	if (r)
-		return r;
-
 	return r;
 }
 
@@ -230,10 +232,12 @@
 	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
+	/* Skip this for APU for now */
+	if (!(adev->flags & AMD_IS_APU)) {
+		r = amdgpu_uvd_resume(adev);
+		if (r)
+			return r;
+	}
 	r = uvd_v6_0_hw_init(adev);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 552d9e7..b55ceb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1400,7 +1400,8 @@
 	case CHIP_CARRIZO:
 		adev->has_uvd = true;
 		adev->cg_flags = 0;
-		adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE;
+		/* Disable UVD pg */
+		adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
 		adev->external_rev_id = adev->rev_id + 0x1;
 		if (amdgpu_smc_load_fw && smc_enabled)
 			adev->firmware.smu_load = true;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
new file mode 100644
index 0000000..144f50a
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -0,0 +1,41 @@
+#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _GPU_SCHED_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpu_sched
+#define TRACE_INCLUDE_FILE gpu_sched_trace
+
+TRACE_EVENT(amd_sched_job,
+	    TP_PROTO(struct amd_sched_job *sched_job),
+	    TP_ARGS(sched_job),
+	    TP_STRUCT__entry(
+			     __field(struct amd_sched_entity *, entity)
+			     __field(const char *, name)
+			     __field(u32, job_count)
+			     __field(int, hw_job_count)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->entity = sched_job->s_entity;
+			   __entry->name = sched_job->sched->name;
+			   __entry->job_count = kfifo_len(
+				   &sched_job->s_entity->job_queue) / sizeof(sched_job);
+			   __entry->hw_job_count = atomic_read(
+				   &sched_job->sched->hw_rq_count);
+			   ),
+	    TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d",
+		      __entry->entity, __entry->name, __entry->job_count,
+		      __entry->hw_job_count)
+);
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 9259f1b..3697eee 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -27,6 +27,9 @@
 #include <drm/drmP.h>
 #include "gpu_scheduler.h"
 
+#define CREATE_TRACE_POINTS
+#include "gpu_sched_trace.h"
+
 static struct amd_sched_job *
 amd_sched_entity_pop_job(struct amd_sched_entity *entity);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
@@ -65,29 +68,29 @@
 amd_sched_rq_select_job(struct amd_sched_rq *rq)
 {
 	struct amd_sched_entity *entity;
-	struct amd_sched_job *job;
+	struct amd_sched_job *sched_job;
 
 	spin_lock(&rq->lock);
 
 	entity = rq->current_entity;
 	if (entity) {
 		list_for_each_entry_continue(entity, &rq->entities, list) {
-			job = amd_sched_entity_pop_job(entity);
-			if (job) {
+			sched_job = amd_sched_entity_pop_job(entity);
+			if (sched_job) {
 				rq->current_entity = entity;
 				spin_unlock(&rq->lock);
-				return job;
+				return sched_job;
 			}
 		}
 	}
 
 	list_for_each_entry(entity, &rq->entities, list) {
 
-		job = amd_sched_entity_pop_job(entity);
-		if (job) {
+		sched_job = amd_sched_entity_pop_job(entity);
+		if (sched_job) {
 			rq->current_entity = entity;
 			spin_unlock(&rq->lock);
-			return job;
+			return sched_job;
 		}
 
 		if (entity == rq->current_entity)
@@ -115,23 +118,27 @@
 			  struct amd_sched_rq *rq,
 			  uint32_t jobs)
 {
+	int r;
+
 	if (!(sched && entity && rq))
 		return -EINVAL;
 
 	memset(entity, 0, sizeof(struct amd_sched_entity));
-	entity->belongto_rq = rq;
-	entity->scheduler = sched;
-	entity->fence_context = fence_context_alloc(1);
-	if(kfifo_alloc(&entity->job_queue,
-		       jobs * sizeof(void *),
-		       GFP_KERNEL))
-		return -EINVAL;
+	INIT_LIST_HEAD(&entity->list);
+	entity->rq = rq;
+	entity->sched = sched;
 
 	spin_lock_init(&entity->queue_lock);
+	r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
+	if (r)
+		return r;
+
 	atomic_set(&entity->fence_seq, 0);
+	entity->fence_context = fence_context_alloc(1);
 
 	/* Add the entity to the run queue */
 	amd_sched_rq_add_entity(rq, entity);
+
 	return 0;
 }
 
@@ -146,8 +153,8 @@
 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
 					    struct amd_sched_entity *entity)
 {
-	return entity->scheduler == sched &&
-		entity->belongto_rq != NULL;
+	return entity->sched == sched &&
+		entity->rq != NULL;
 }
 
 /**
@@ -177,7 +184,7 @@
 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
 			   struct amd_sched_entity *entity)
 {
-	struct amd_sched_rq *rq = entity->belongto_rq;
+	struct amd_sched_rq *rq = entity->rq;
 
 	if (!amd_sched_entity_is_initialized(sched, entity))
 		return;
@@ -198,22 +205,22 @@
 		container_of(cb, struct amd_sched_entity, cb);
 	entity->dependency = NULL;
 	fence_put(f);
-	amd_sched_wakeup(entity->scheduler);
+	amd_sched_wakeup(entity->sched);
 }
 
 static struct amd_sched_job *
 amd_sched_entity_pop_job(struct amd_sched_entity *entity)
 {
-	struct amd_gpu_scheduler *sched = entity->scheduler;
-	struct amd_sched_job *job;
+	struct amd_gpu_scheduler *sched = entity->sched;
+	struct amd_sched_job *sched_job;
 
 	if (ACCESS_ONCE(entity->dependency))
 		return NULL;
 
-	if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
+	if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
 		return NULL;
 
-	while ((entity->dependency = sched->ops->dependency(job))) {
+	while ((entity->dependency = sched->ops->dependency(sched_job))) {
 
 		if (fence_add_callback(entity->dependency, &entity->cb,
 				       amd_sched_entity_wakeup))
@@ -222,32 +229,33 @@
 			return NULL;
 	}
 
-	return job;
+	return sched_job;
 }
 
 /**
  * Helper to submit a job to the job queue
  *
- * @job		The pointer to job required to submit
+ * @sched_job		The pointer to job required to submit
  *
  * Returns true if we could submit the job.
  */
-static bool amd_sched_entity_in(struct amd_sched_job *job)
+static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
 {
-	struct amd_sched_entity *entity = job->s_entity;
+	struct amd_sched_entity *entity = sched_job->s_entity;
 	bool added, first = false;
 
 	spin_lock(&entity->queue_lock);
-	added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
+	added = kfifo_in(&entity->job_queue, &sched_job,
+			sizeof(sched_job)) == sizeof(sched_job);
 
-	if (added && kfifo_len(&entity->job_queue) == sizeof(job))
+	if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
 		first = true;
 
 	spin_unlock(&entity->queue_lock);
 
 	/* first job wakes up scheduler */
 	if (first)
-		amd_sched_wakeup(job->sched);
+		amd_sched_wakeup(sched_job->sched);
 
 	return added;
 }
@@ -255,7 +263,7 @@
 /**
  * Submit a job to the job queue
  *
- * @job		The pointer to job required to submit
+ * @sched_job		The pointer to job required to submit
  *
  * Returns 0 for success, negative error code otherwise.
  */
@@ -271,9 +279,9 @@
 	fence_get(&fence->base);
 	sched_job->s_fence = fence;
 
-	wait_event(entity->scheduler->job_scheduled,
+	wait_event(entity->sched->job_scheduled,
 		   amd_sched_entity_in(sched_job));
-
+	trace_amd_sched_job(sched_job);
 	return 0;
 }
 
@@ -301,30 +309,28 @@
 static struct amd_sched_job *
 amd_sched_select_job(struct amd_gpu_scheduler *sched)
 {
-	struct amd_sched_job *job;
+	struct amd_sched_job *sched_job;
 
 	if (!amd_sched_ready(sched))
 		return NULL;
 
 	/* Kernel run queue has higher priority than normal run queue*/
-	job = amd_sched_rq_select_job(&sched->kernel_rq);
-	if (job == NULL)
-		job = amd_sched_rq_select_job(&sched->sched_rq);
+	sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
+	if (sched_job == NULL)
+		sched_job = amd_sched_rq_select_job(&sched->sched_rq);
 
-	return job;
+	return sched_job;
 }
 
 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
 {
-	struct amd_sched_job *sched_job =
-		container_of(cb, struct amd_sched_job, cb);
-	struct amd_gpu_scheduler *sched;
+	struct amd_sched_fence *s_fence =
+		container_of(cb, struct amd_sched_fence, cb);
+	struct amd_gpu_scheduler *sched = s_fence->sched;
 
-	sched = sched_job->sched;
-	amd_sched_fence_signal(sched_job->s_fence);
 	atomic_dec(&sched->hw_rq_count);
-	fence_put(&sched_job->s_fence->base);
-	sched->ops->process_job(sched_job);
+	amd_sched_fence_signal(s_fence);
+	fence_put(&s_fence->base);
 	wake_up_interruptible(&sched->wake_up_worker);
 }
 
@@ -338,87 +344,82 @@
 
 	while (!kthread_should_stop()) {
 		struct amd_sched_entity *entity;
-		struct amd_sched_job *job;
+		struct amd_sched_fence *s_fence;
+		struct amd_sched_job *sched_job;
 		struct fence *fence;
 
 		wait_event_interruptible(sched->wake_up_worker,
 			kthread_should_stop() ||
-			(job = amd_sched_select_job(sched)));
+			(sched_job = amd_sched_select_job(sched)));
 
-		if (!job)
+		if (!sched_job)
 			continue;
 
-		entity = job->s_entity;
+		entity = sched_job->s_entity;
+		s_fence = sched_job->s_fence;
 		atomic_inc(&sched->hw_rq_count);
-		fence = sched->ops->run_job(job);
+		fence = sched->ops->run_job(sched_job);
 		if (fence) {
-			r = fence_add_callback(fence, &job->cb,
+			r = fence_add_callback(fence, &s_fence->cb,
 					       amd_sched_process_job);
 			if (r == -ENOENT)
-				amd_sched_process_job(fence, &job->cb);
+				amd_sched_process_job(fence, &s_fence->cb);
 			else if (r)
 				DRM_ERROR("fence add callback failed (%d)\n", r);
 			fence_put(fence);
+		} else {
+			DRM_ERROR("Failed to run job!\n");
+			amd_sched_process_job(NULL, &s_fence->cb);
 		}
 
-		count = kfifo_out(&entity->job_queue, &job, sizeof(job));
-		WARN_ON(count != sizeof(job));
+		count = kfifo_out(&entity->job_queue, &sched_job,
+				sizeof(sched_job));
+		WARN_ON(count != sizeof(sched_job));
 		wake_up(&sched->job_scheduled);
 	}
 	return 0;
 }
 
 /**
- * Create a gpu scheduler
+ * Init a gpu scheduler instance
  *
+ * @sched		The pointer to the scheduler
  * @ops			The backend operations for this scheduler.
- * @ring		The the ring id for the scheduler.
  * @hw_submissions	Number of hw submissions to do.
+ * @name		Name used for debugging
  *
- * Return the pointer to scheduler for success, otherwise return NULL
+ * Return 0 on success, otherwise error code.
 */
-struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
-					   unsigned ring, unsigned hw_submission,
-					   void *priv)
+int amd_sched_init(struct amd_gpu_scheduler *sched,
+		   struct amd_sched_backend_ops *ops,
+		   unsigned hw_submission, const char *name)
 {
-	struct amd_gpu_scheduler *sched;
-
-	sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
-	if (!sched)
-		return NULL;
-
 	sched->ops = ops;
-	sched->ring_id = ring;
 	sched->hw_submission_limit = hw_submission;
-	sched->priv = priv;
-	snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
+	sched->name = name;
 	amd_sched_rq_init(&sched->sched_rq);
 	amd_sched_rq_init(&sched->kernel_rq);
 
 	init_waitqueue_head(&sched->wake_up_worker);
 	init_waitqueue_head(&sched->job_scheduled);
 	atomic_set(&sched->hw_rq_count, 0);
+
 	/* Each scheduler will run on a seperate kernel thread */
 	sched->thread = kthread_run(amd_sched_main, sched, sched->name);
 	if (IS_ERR(sched->thread)) {
-		DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
-		kfree(sched);
-		return NULL;
+		DRM_ERROR("Failed to create scheduler for %s.\n", name);
+		return PTR_ERR(sched->thread);
 	}
 
-	return sched;
+	return 0;
 }
 
 /**
  * Destroy a gpu scheduler
  *
  * @sched	The pointer to the scheduler
- *
- * return 0 if succeed. -1 if failed.
  */
-int amd_sched_destroy(struct amd_gpu_scheduler *sched)
+void amd_sched_fini(struct amd_gpu_scheduler *sched)
 {
 	kthread_stop(sched->thread);
-	kfree(sched);
-	return  0;
 }
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 2af0e4d..80b64dc 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -38,13 +38,15 @@
 */
 struct amd_sched_entity {
 	struct list_head		list;
-	struct amd_sched_rq		*belongto_rq;
-	atomic_t			fence_seq;
-	/* the job_queue maintains the jobs submitted by clients */
-	struct kfifo                    job_queue;
+	struct amd_sched_rq		*rq;
+	struct amd_gpu_scheduler	*sched;
+
 	spinlock_t			queue_lock;
-	struct amd_gpu_scheduler	*scheduler;
+	struct kfifo                    job_queue;
+
+	atomic_t			fence_seq;
 	uint64_t                        fence_context;
+
 	struct fence			*dependency;
 	struct fence_cb			cb;
 };
@@ -62,13 +64,13 @@
 
 struct amd_sched_fence {
 	struct fence                    base;
-	struct amd_gpu_scheduler	*scheduler;
+	struct fence_cb                 cb;
+	struct amd_gpu_scheduler	*sched;
 	spinlock_t			lock;
 	void                            *owner;
 };
 
 struct amd_sched_job {
-	struct fence_cb                 cb;
 	struct amd_gpu_scheduler        *sched;
 	struct amd_sched_entity         *s_entity;
 	struct amd_sched_fence          *s_fence;
@@ -91,32 +93,29 @@
  * these functions should be implemented in driver side
 */
 struct amd_sched_backend_ops {
-	struct fence *(*dependency)(struct amd_sched_job *job);
-	struct fence *(*run_job)(struct amd_sched_job *job);
-	void (*process_job)(struct amd_sched_job *job);
+	struct fence *(*dependency)(struct amd_sched_job *sched_job);
+	struct fence *(*run_job)(struct amd_sched_job *sched_job);
 };
 
 /**
  * One scheduler is implemented for each hardware ring
 */
 struct amd_gpu_scheduler {
-	struct task_struct		*thread;
+	struct amd_sched_backend_ops	*ops;
+	uint32_t			hw_submission_limit;
+	const char			*name;
 	struct amd_sched_rq		sched_rq;
 	struct amd_sched_rq		kernel_rq;
-	atomic_t			hw_rq_count;
-	struct amd_sched_backend_ops	*ops;
-	uint32_t			ring_id;
 	wait_queue_head_t		wake_up_worker;
 	wait_queue_head_t		job_scheduled;
-	uint32_t                        hw_submission_limit;
-	char                            name[20];
-	void                            *priv;
+	atomic_t			hw_rq_count;
+	struct task_struct		*thread;
 };
 
-struct amd_gpu_scheduler *
-amd_sched_create(struct amd_sched_backend_ops *ops,
-		 uint32_t ring, uint32_t hw_submission, void *priv);
-int amd_sched_destroy(struct amd_gpu_scheduler *sched);
+int amd_sched_init(struct amd_gpu_scheduler *sched,
+		   struct amd_sched_backend_ops *ops,
+		   uint32_t hw_submission, const char *name);
+void amd_sched_fini(struct amd_gpu_scheduler *sched);
 
 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
 			  struct amd_sched_entity *entity,
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index e62c379..d802638 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -36,7 +36,7 @@
 	if (fence == NULL)
 		return NULL;
 	fence->owner = owner;
-	fence->scheduler = s_entity->scheduler;
+	fence->sched = s_entity->sched;
 	spin_lock_init(&fence->lock);
 
 	seq = atomic_inc_return(&s_entity->fence_seq);
@@ -63,7 +63,7 @@
 static const char *amd_sched_fence_get_timeline_name(struct fence *f)
 {
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
-	return (const char *)fence->scheduler->name;
+	return (const char *)fence->sched->name;
 }
 
 static bool amd_sched_fence_enable_signaling(struct fence *f)
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 9a860ca..d93e737 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -520,7 +520,8 @@
 
 /** Ioctl table */
 static const struct drm_ioctl_desc drm_ioctls[] = {
-	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
+		      DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index df0b61a..bd1a415 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -77,6 +77,7 @@
 config DRM_EXYNOS_G2D
 	bool "Exynos DRM G2D"
 	depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
+	select FRAME_VECTOR
 	help
 	  Choose this option if you want to use Exynos G2D for DRM.
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 535b4ad..3734c34 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -194,10 +194,8 @@
 	dma_addr_t		dma_addr;
 	unsigned long		userptr;
 	unsigned long		size;
-	struct page		**pages;
-	unsigned int		npages;
+	struct frame_vector	*vec;
 	struct sg_table		*sgt;
-	struct vm_area_struct	*vma;
 	atomic_t		refcount;
 	bool			in_pool;
 	bool			out_of_list;
@@ -367,6 +365,7 @@
 {
 	struct g2d_cmdlist_userptr *g2d_userptr =
 					(struct g2d_cmdlist_userptr *)obj;
+	struct page **pages;
 
 	if (!obj)
 		return;
@@ -386,19 +385,21 @@
 	exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
 					DMA_BIDIRECTIONAL);
 
-	exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
-					g2d_userptr->npages,
-					g2d_userptr->vma);
+	pages = frame_vector_pages(g2d_userptr->vec);
+	if (!IS_ERR(pages)) {
+		int i;
 
-	exynos_gem_put_vma(g2d_userptr->vma);
+		for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++)
+			set_page_dirty_lock(pages[i]);
+	}
+	put_vaddr_frames(g2d_userptr->vec);
+	frame_vector_destroy(g2d_userptr->vec);
 
 	if (!g2d_userptr->out_of_list)
 		list_del_init(&g2d_userptr->list);
 
 	sg_free_table(g2d_userptr->sgt);
 	kfree(g2d_userptr->sgt);
-
-	drm_free_large(g2d_userptr->pages);
 	kfree(g2d_userptr);
 }
 
@@ -412,9 +413,7 @@
 	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
 	struct g2d_cmdlist_userptr *g2d_userptr;
 	struct g2d_data *g2d;
-	struct page **pages;
 	struct sg_table	*sgt;
-	struct vm_area_struct *vma;
 	unsigned long start, end;
 	unsigned int npages, offset;
 	int ret;
@@ -460,65 +459,40 @@
 		return ERR_PTR(-ENOMEM);
 
 	atomic_set(&g2d_userptr->refcount, 1);
+	g2d_userptr->size = size;
 
 	start = userptr & PAGE_MASK;
 	offset = userptr & ~PAGE_MASK;
 	end = PAGE_ALIGN(userptr + size);
 	npages = (end - start) >> PAGE_SHIFT;
-	g2d_userptr->npages = npages;
-
-	pages = drm_calloc_large(npages, sizeof(struct page *));
-	if (!pages) {
-		DRM_ERROR("failed to allocate pages.\n");
+	g2d_userptr->vec = frame_vector_create(npages);
+	if (!g2d_userptr->vec) {
 		ret = -ENOMEM;
 		goto err_free;
 	}
 
-	down_read(&current->mm->mmap_sem);
-	vma = find_vma(current->mm, userptr);
-	if (!vma) {
-		up_read(&current->mm->mmap_sem);
-		DRM_ERROR("failed to get vm region.\n");
-		ret = -EFAULT;
-		goto err_free_pages;
-	}
-
-	if (vma->vm_end < userptr + size) {
-		up_read(&current->mm->mmap_sem);
-		DRM_ERROR("vma is too small.\n");
-		ret = -EFAULT;
-		goto err_free_pages;
-	}
-
-	g2d_userptr->vma = exynos_gem_get_vma(vma);
-	if (!g2d_userptr->vma) {
-		up_read(&current->mm->mmap_sem);
-		DRM_ERROR("failed to copy vma.\n");
-		ret = -ENOMEM;
-		goto err_free_pages;
-	}
-
-	g2d_userptr->size = size;
-
-	ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
-						npages, pages, vma);
-	if (ret < 0) {
-		up_read(&current->mm->mmap_sem);
+	ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
+	if (ret != npages) {
 		DRM_ERROR("failed to get user pages from userptr.\n");
-		goto err_put_vma;
+		if (ret < 0)
+			goto err_destroy_framevec;
+		ret = -EFAULT;
+		goto err_put_framevec;
 	}
-
-	up_read(&current->mm->mmap_sem);
-	g2d_userptr->pages = pages;
+	if (frame_vector_to_pages(g2d_userptr->vec) < 0) {
+		ret = -EFAULT;
+		goto err_put_framevec;
+	}
 
 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
 	if (!sgt) {
 		ret = -ENOMEM;
-		goto err_free_userptr;
+		goto err_put_framevec;
 	}
 
-	ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
-					size, GFP_KERNEL);
+	ret = sg_alloc_table_from_pages(sgt,
+					frame_vector_pages(g2d_userptr->vec),
+					npages, offset, size, GFP_KERNEL);
 	if (ret < 0) {
 		DRM_ERROR("failed to get sgt from pages.\n");
 		goto err_free_sgt;
@@ -553,16 +527,11 @@
 err_free_sgt:
 	kfree(sgt);
 
-err_free_userptr:
-	exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
-					g2d_userptr->npages,
-					g2d_userptr->vma);
+err_put_framevec:
+	put_vaddr_frames(g2d_userptr->vec);
 
-err_put_vma:
-	exynos_gem_put_vma(g2d_userptr->vma);
-
-err_free_pages:
-	drm_free_large(pages);
+err_destroy_framevec:
+	frame_vector_destroy(g2d_userptr->vec);
 
 err_free:
 	kfree(g2d_userptr);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 62b9ea1..f12fbc3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -366,103 +366,6 @@
 	return 0;
 }
 
-struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
-{
-	struct vm_area_struct *vma_copy;
-
-	vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
-	if (!vma_copy)
-		return NULL;
-
-	if (vma->vm_ops && vma->vm_ops->open)
-		vma->vm_ops->open(vma);
-
-	if (vma->vm_file)
-		get_file(vma->vm_file);
-
-	memcpy(vma_copy, vma, sizeof(*vma));
-
-	vma_copy->vm_mm = NULL;
-	vma_copy->vm_next = NULL;
-	vma_copy->vm_prev = NULL;
-
-	return vma_copy;
-}
-
-void exynos_gem_put_vma(struct vm_area_struct *vma)
-{
-	if (!vma)
-		return;
-
-	if (vma->vm_ops && vma->vm_ops->close)
-		vma->vm_ops->close(vma);
-
-	if (vma->vm_file)
-		fput(vma->vm_file);
-
-	kfree(vma);
-}
-
-int exynos_gem_get_pages_from_userptr(unsigned long start,
-						unsigned int npages,
-						struct page **pages,
-						struct vm_area_struct *vma)
-{
-	int get_npages;
-
-	/* the memory region mmaped with VM_PFNMAP. */
-	if (vma_is_io(vma)) {
-		unsigned int i;
-
-		for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
-			unsigned long pfn;
-			int ret = follow_pfn(vma, start, &pfn);
-			if (ret)
-				return ret;
-
-			pages[i] = pfn_to_page(pfn);
-		}
-
-		if (i != npages) {
-			DRM_ERROR("failed to get user_pages.\n");
-			return -EINVAL;
-		}
-
-		return 0;
-	}
-
-	get_npages = get_user_pages(current, current->mm, start,
-					npages, 1, 1, pages, NULL);
-	get_npages = max(get_npages, 0);
-	if (get_npages != npages) {
-		DRM_ERROR("failed to get user_pages.\n");
-		while (get_npages)
-			put_page(pages[--get_npages]);
-		return -EFAULT;
-	}
-
-	return 0;
-}
-
-void exynos_gem_put_pages_to_userptr(struct page **pages,
-					unsigned int npages,
-					struct vm_area_struct *vma)
-{
-	if (!vma_is_io(vma)) {
-		unsigned int i;
-
-		for (i = 0; i < npages; i++) {
-			set_page_dirty_lock(pages[i]);
-
-			/*
-			 * undo the reference we took when populating
-			 * the table.
-			 */
-			put_page(pages[i]);
-		}
-	}
-}
-
 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
 				struct sg_table *sgt,
 				enum dma_data_direction dir)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 82be6b8..d1e300d 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -58,7 +58,8 @@
 					     struct drm_plane_state *old_state)
 {
 	struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
-	unsigned int index, value, ret;
+	unsigned int value;
+	int index, ret;
 
 	index = fsl_dcu_drm_plane_index(plane);
 	if (index < 0)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5a244ab..39d73db 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -640,6 +640,32 @@
 		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
 	/*
+	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
+	 * read it just before the start of vblank.  So try it again
+	 * so we don't accidentally end up spanning a vblank frame
+	 * increment, causing the pipe_update_end() code to squak at us.
+	 *
+	 * The nature of this problem means we can't simply check the ISR
+	 * bit and return the vblank start value; nor can we use the scanline
+	 * debug register in the transcoder as it appears to have the same
+	 * problem.  We may need to extend this to include other platforms,
+	 * but so far testing only shows the problem on HSW.
+	 */
+	if (IS_HASWELL(dev) && !position) {
+		int i, temp;
+
+		for (i = 0; i < 100; i++) {
+			udelay(1);
+			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
+				DSL_LINEMASK_GEN3;
+			if (temp != position) {
+				position = temp;
+				break;
+			}
+		}
+	}
+
+	/*
 	 * See update_scanline_offset() for the details on the
 	 * scanline_offset adjustment.
 	 */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 89c1a8ce..2a5c76f 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -430,7 +430,7 @@
 
 /**
  * intel_audio_codec_disable - Disable the audio codec for HD audio
- * @encoder: encoder on which to disable audio
+ * @intel_encoder: encoder on which to disable audio
  *
  * The disable sequences must be performed before disabling the transcoder or
  * port.
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b3e437b..c19e669 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -42,7 +42,7 @@
 	const struct bdb_header *bdb = _bdb;
 	const u8 *base = _bdb;
 	int index = 0;
-	u16 total, current_size;
+	u32 total, current_size;
 	u8 current_id;
 
 	/* skip to first section */
@@ -57,6 +57,10 @@
 		current_size = *((const u16 *)(base + index));
 		index += 2;
 
+		/* The MIPI Sequence Block v3+ has a separate size field. */
+		if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
+			current_size = *((const u32 *)(base + index + 1));
+
 		if (index + current_size > total)
 			return NULL;
 
@@ -799,6 +803,12 @@
 		return;
 	}
 
+	/* Fail gracefully for forward incompatible sequence block. */
+	if (sequence->version >= 3) {
+		DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
+		return;
+	}
+
 	DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
 
 	block_size = get_blocksize(sequence);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8cc9264..cf418be 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15087,9 +15087,12 @@
 
 		plane_state = to_intel_plane_state(p->base.state);
 
-		if (p->base.type == DRM_PLANE_TYPE_PRIMARY)
+		if (p->base.type == DRM_PLANE_TYPE_PRIMARY) {
 			plane_state->visible = primary_get_hw_state(crtc);
-		else {
+			if (plane_state->visible)
+				crtc->base.state->plane_mask |=
+					1 << drm_plane_index(&p->base);
+		} else {
 			if (active)
 				p->disable_plane(&p->base, &crtc->base);
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 87de15e..b35b5b2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -186,17 +186,19 @@
 
 	sysram = vmalloc(size);
 	if (!sysram)
-		return -ENOMEM;
+		goto err_sysram;
 
 	info = drm_fb_helper_alloc_fbi(helper);
-	if (IS_ERR(info))
-		return PTR_ERR(info);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
+		goto err_alloc_fbi;
+	}
 
 	info->par = mfbdev;
 
 	ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
 	if (ret)
-		return ret;
+		goto err_framebuffer_init;
 
 	mfbdev->sysram = sysram;
 	mfbdev->size = size;
@@ -225,7 +227,17 @@
 
 	DRM_DEBUG_KMS("allocated %dx%d\n",
 		      fb->width, fb->height);
+
 	return 0;
+
+err_framebuffer_init:
+	drm_fb_helper_release_fbi(helper);
+err_alloc_fbi:
+	vfree(sysram);
+err_sysram:
+	drm_gem_object_unreference_unlocked(gobj);
+
+	return ret;
 }
 
 static int mga_fbdev_destroy(struct drm_device *dev,
@@ -276,23 +288,26 @@
 	ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
 				 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
 	if (ret)
-		return ret;
+		goto err_fb_helper;
 
 	ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
 	if (ret)
-		goto fini;
+		goto err_fb_setup;
 
 	/* disable all the possible outputs/crtcs before entering KMS mode */
 	drm_helper_disable_unused_functions(mdev->dev);
 
 	ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
 	if (ret)
-		goto fini;
+		goto err_fb_setup;
 
 	return 0;
 
-fini:
+err_fb_setup:
 	drm_fb_helper_fini(&mfbdev->helper);
+err_fb_helper:
+	mdev->mfbdev = NULL;
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index de06388..b1a0f56 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -220,7 +220,7 @@
 	}
 	r = mgag200_mm_init(mdev);
 	if (r)
-		goto out;
+		goto err_mm;
 
 	drm_mode_config_init(dev);
 	dev->mode_config.funcs = (void *)&mga_mode_funcs;
@@ -233,7 +233,7 @@
 	r = mgag200_modeset_init(mdev);
 	if (r) {
 		dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
-		goto out;
+		goto err_modeset;
 	}
 
 	/* Make small buffers to store a hardware cursor (double buffered icon updates) */
@@ -241,20 +241,24 @@
 					  &mdev->cursor.pixels_1);
 	mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
 					  &mdev->cursor.pixels_2);
-	if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1)
-		goto cursor_nospace;
-	mdev->cursor.pixels_current = mdev->cursor.pixels_1;
-	mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
-	goto cursor_done;
- cursor_nospace:
-	mdev->cursor.pixels_1 = NULL;
-	mdev->cursor.pixels_2 = NULL;
-	dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n");
- cursor_done:
+	if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) {
+		mdev->cursor.pixels_1 = NULL;
+		mdev->cursor.pixels_2 = NULL;
+		dev_warn(&dev->pdev->dev,
+			"Could not allocate space for cursors. Not doing hardware cursors.\n");
+	} else {
+		mdev->cursor.pixels_current = mdev->cursor.pixels_1;
+		mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
+	}
 
-out:
-	if (r)
-		mgag200_driver_unload(dev);
+	return 0;
+
+err_modeset:
+	drm_mode_config_cleanup(dev);
+	mgag200_mm_fini(mdev);
+err_mm:
+	dev->dev_private = NULL;
+
 	return r;
 }
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index b1f73be..b0d4b53 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -178,7 +178,6 @@
 
 	irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
 	irq_set_chip_data(irq, mdp5_kms);
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 7c6225c..dd845f8 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -886,13 +886,15 @@
 		drm_connector_to_qxl_output(connector);
 	struct drm_device *ddev = connector->dev;
 	struct qxl_device *qdev = ddev->dev_private;
-	int connected;
+	bool connected = false;
 
 	/* The first monitor is always connected */
-	connected = (output->index == 0) ||
-		    (qdev->client_monitors_config &&
-		     qdev->client_monitors_config->count > output->index &&
-		     qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
+	if (!qdev->client_monitors_config) {
+		if (output->index == 0)
+			connected = true;
+	} else
+		connected = qdev->client_monitors_config->count > output->index &&
+		     qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
 
 	DRM_DEBUG("#%d connected: %d\n", output->index, connected);
 	if (!connected)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index d8319da..f3f562f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1573,10 +1573,12 @@
 
 	drm_kms_helper_poll_disable(dev);
 
+	drm_modeset_lock_all(dev);
 	/* turn off display hw */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
 	}
+	drm_modeset_unlock_all(dev);
 
 	/* unpin the front buffers and cursors */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1734,9 +1736,11 @@
 	if (fbcon) {
 		drm_helper_resume_force_mode(dev);
 		/* turn on display hw */
+		drm_modeset_lock_all(dev);
 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
 		}
+		drm_modeset_unlock_all(dev);
 	}
 
 	drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 787cd8f..e9115d3 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2927,6 +2927,7 @@
 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
 	{ 0, 0, 0, 0 },
 };
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d9b7de..745e996 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -882,6 +882,8 @@
 		if (ret)
 			return ret;
 		man = &bdev->man[mem_type];
+		if (!man->has_type || !man->use_type)
+			continue;
 
 		type_ok = ttm_bo_mt_compatible(man, mem_type, place,
 						&cur_flags);
@@ -889,6 +891,7 @@
 		if (!type_ok)
 			continue;
 
+		type_found = true;
 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
 						  cur_flags);
 		/*
@@ -901,12 +904,10 @@
 		if (mem_type == TTM_PL_SYSTEM)
 			break;
 
-		if (man->has_type && man->use_type) {
-			type_found = true;
-			ret = (*man->func->get_node)(man, bo, place, mem);
-			if (unlikely(ret))
-				return ret;
-		}
+		ret = (*man->func->get_node)(man, bo, place, mem);
+		if (unlikely(ret))
+			return ret;
+		
 		if (mem->mm_node)
 			break;
 	}
@@ -917,9 +918,6 @@
 		return 0;
 	}
 
-	if (!type_found)
-		return -EINVAL;
-
 	for (i = 0; i < placement->num_busy_placement; ++i) {
 		const struct ttm_place *place = &placement->busy_placement[i];
 
@@ -927,11 +925,12 @@
 		if (ret)
 			return ret;
 		man = &bdev->man[mem_type];
-		if (!man->has_type)
+		if (!man->has_type || !man->use_type)
 			continue;
 		if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
 			continue;
 
+		type_found = true;
 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
 						  cur_flags);
 		/*
@@ -957,8 +956,13 @@
 		if (ret == -ERESTARTSYS)
 			has_erestartsys = true;
 	}
-	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
-	return ret;
+
+	if (!type_found) {
+		printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
+		return -EINVAL;
+	}
+
+	return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
 }
 EXPORT_SYMBOL(ttm_bo_mem_space);
 
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 67720f7..b49445d 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,6 +1,6 @@
 config DRM_VMWGFX
 	tristate "DRM driver for VMware Virtual GPU"
-	depends on DRM && PCI
+	depends on DRM && PCI && X86
 	select FB_DEFERRED_IO
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index ce659a1..092ea81 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -311,7 +311,6 @@
 	struct vmw_private *dev_priv = res->dev_priv;
 	struct ttm_buffer_object *bo = val_buf->bo;
 	struct vmw_fence_obj *fence;
-	int ret;
 
 	if (list_empty(&res->mob_head))
 		return 0;
@@ -328,7 +327,7 @@
 	if (likely(fence != NULL))
 		vmw_fence_obj_unreference(&fence);
 
-	return ret;
+	return 0;
 }
 
 /**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e13b20b..2c7a25c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -752,12 +752,8 @@
 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
 	dev_priv->active_master = &dev_priv->fbdev_master;
 
-
-	dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
-					       dev_priv->mmio_size);
-
-	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
-					 dev_priv->mmio_size);
+	dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
+					    dev_priv->mmio_size);
 
 	if (unlikely(dev_priv->mmio_virt == NULL)) {
 		ret = -ENOMEM;
@@ -913,7 +909,6 @@
 out_err4:
 	iounmap(dev_priv->mmio_virt);
 out_err3:
-	arch_phys_wc_del(dev_priv->mmio_mtrr);
 	vmw_ttm_global_release(dev_priv);
 out_err0:
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -964,7 +959,6 @@
 
 	ttm_object_device_release(&dev_priv->tdev);
 	iounmap(dev_priv->mmio_virt);
-	arch_phys_wc_del(dev_priv->mmio_mtrr);
 	if (dev_priv->ctx.staged_bindings)
 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 	vmw_ttm_global_release(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 6d02de6..f19fd39 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -376,7 +376,6 @@
 	uint32_t initial_width;
 	uint32_t initial_height;
 	u32 __iomem *mmio_virt;
-	int mmio_mtrr;
 	uint32_t capabilities;
 	uint32_t max_gmr_ids;
 	uint32_t max_gmr_pages;
@@ -631,7 +630,8 @@
 				 uint32_t size,
 				 bool shareable,
 				 uint32_t *handle,
-				 struct vmw_dma_buffer **p_dma_buf);
+				 struct vmw_dma_buffer **p_dma_buf,
+				 struct ttm_base_object **p_base);
 extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
 				     struct vmw_dma_buffer *dma_buf,
 				     uint32_t *handle);
@@ -645,7 +645,8 @@
 					 uint32_t cur_validate_node);
 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
 extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
-				  uint32_t id, struct vmw_dma_buffer **out);
+				  uint32_t id, struct vmw_dma_buffer **out,
+				  struct ttm_base_object **base);
 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file_priv);
 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index b565654..5da5de0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1236,7 +1236,8 @@
 	struct vmw_relocation *reloc;
 	int ret;
 
-	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
+	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
+				     NULL);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Could not find or use MOB buffer.\n");
 		ret = -EINVAL;
@@ -1296,7 +1297,8 @@
 	struct vmw_relocation *reloc;
 	int ret;
 
-	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
+	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
+				     NULL);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Could not find or use GMR region.\n");
 		ret = -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 61fb7f3..15a6c01 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1685,7 +1685,6 @@
 	struct drm_crtc *crtc;
 	u32 num_units = 0;
 	u32 i, k;
-	int ret;
 
 	dirty->dev_priv = dev_priv;
 
@@ -1711,7 +1710,7 @@
 			if (!dirty->cmd) {
 				DRM_ERROR("Couldn't reserve fifo space "
 					  "for dirty blits.\n");
-				return ret;
+				return -ENOMEM;
 			}
 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
 		}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 76069f0..222c9c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -484,7 +484,7 @@
 		goto out_unlock;
 	}
 
-	ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
+	ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
 	if (ret)
 		goto out_unlock;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c1912f8..e57667c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -354,7 +354,7 @@
 	}
 
 	*out_surf = NULL;
-	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
 	return ret;
 }
 
@@ -481,7 +481,8 @@
 			  uint32_t size,
 			  bool shareable,
 			  uint32_t *handle,
-			  struct vmw_dma_buffer **p_dma_buf)
+			  struct vmw_dma_buffer **p_dma_buf,
+			  struct ttm_base_object **p_base)
 {
 	struct vmw_user_dma_buffer *user_bo;
 	struct ttm_buffer_object *tmp;
@@ -515,6 +516,10 @@
 	}
 
 	*p_dma_buf = &user_bo->dma;
+	if (p_base) {
+		*p_base = &user_bo->prime.base;
+		kref_get(&(*p_base)->refcount);
+	}
 	*handle = user_bo->prime.base.hash.key;
 
 out_no_base_object:
@@ -631,6 +636,7 @@
 	struct vmw_dma_buffer *dma_buf;
 	struct vmw_user_dma_buffer *user_bo;
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct ttm_base_object *buffer_base;
 	int ret;
 
 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
@@ -643,7 +649,8 @@
 
 	switch (arg->op) {
 	case drm_vmw_synccpu_grab:
-		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
+		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
+					     &buffer_base);
 		if (unlikely(ret != 0))
 			return ret;
 
@@ -651,6 +658,7 @@
 				       dma);
 		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
 		vmw_dmabuf_unreference(&dma_buf);
+		ttm_base_object_unref(&buffer_base);
 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
 			     ret != -EBUSY)) {
 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
@@ -692,7 +700,8 @@
 		return ret;
 
 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
-				    req->size, false, &handle, &dma_buf);
+				    req->size, false, &handle, &dma_buf,
+				    NULL);
 	if (unlikely(ret != 0))
 		goto out_no_dmabuf;
 
@@ -721,7 +730,8 @@
 }
 
 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
-			   uint32_t handle, struct vmw_dma_buffer **out)
+			   uint32_t handle, struct vmw_dma_buffer **out,
+			   struct ttm_base_object **p_base)
 {
 	struct vmw_user_dma_buffer *vmw_user_bo;
 	struct ttm_base_object *base;
@@ -743,7 +753,10 @@
 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
 				   prime.base);
 	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
-	ttm_base_object_unref(&base);
+	if (p_base)
+		*p_base = base;
+	else
+		ttm_base_object_unref(&base);
 	*out = &vmw_user_bo->dma;
 
 	return 0;
@@ -1004,7 +1017,7 @@
 
 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
 				    args->size, false, &args->handle,
-				    &dma_buf);
+				    &dma_buf, NULL);
 	if (unlikely(ret != 0))
 		goto out_no_dmabuf;
 
@@ -1032,7 +1045,7 @@
 	struct vmw_dma_buffer *out_buf;
 	int ret;
 
-	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
+	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
 	if (ret != 0)
 		return -EINVAL;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index bba1ee3..fd47547 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -855,7 +855,7 @@
 
 	if (buffer_handle != SVGA3D_INVALID_ID) {
 		ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
-					     &buffer);
+					     &buffer, NULL);
 		if (unlikely(ret != 0)) {
 			DRM_ERROR("Could not find buffer for shader "
 				  "creation.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3361769..64b5040 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -46,6 +46,7 @@
 	struct vmw_surface srf;
 	uint32_t size;
 	struct drm_master *master;
+	struct ttm_base_object *backup_base;
 };
 
 /**
@@ -656,6 +657,7 @@
 	struct vmw_resource *res = &user_srf->srf.res;
 
 	*p_base = NULL;
+	ttm_base_object_unref(&user_srf->backup_base);
 	vmw_resource_unreference(&res);
 }
 
@@ -851,7 +853,8 @@
 					    res->backup_size,
 					    true,
 					    &backup_handle,
-					    &res->backup);
+					    &res->backup,
+					    &user_srf->backup_base);
 		if (unlikely(ret != 0)) {
 			vmw_resource_unreference(&res);
 			goto out_unlock;
@@ -1321,7 +1324,8 @@
 
 	if (req->buffer_handle != SVGA3D_INVALID_ID) {
 		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
-					     &res->backup);
+					     &res->backup,
+					     &user_srf->backup_base);
 		if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
 		    res->backup_size) {
 			DRM_ERROR("Surface backup buffer is too small.\n");
@@ -1335,7 +1339,8 @@
 					    req->drm_surface_flags &
 					    drm_vmw_surface_flag_shareable,
 					    &backup_handle,
-					    &res->backup);
+					    &res->backup,
+					    &user_srf->backup_base);
 
 	if (unlikely(ret != 0)) {
 		vmw_resource_unreference(&res);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 243f99a..e5a38d2 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -912,7 +912,7 @@
 	}
 }
 
-static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ipu_irq_handler(struct irq_desc *desc)
 {
 	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -925,7 +925,7 @@
 	chained_irq_exit(chip, desc);
 }
 
-static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ipu_err_irq_handler(struct irq_desc *desc)
 {
 	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1099,8 +1099,7 @@
 	}
 
 	ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
-					     handle_level_irq, 0,
-					     IRQF_VALID, 0);
+					     handle_level_irq, 0, 0, 0);
 	if (ret < 0) {
 		dev_err(ipu->dev, "failed to alloc generic irq chips\n");
 		irq_domain_remove(ipu->domain);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2f9aead..652afd1 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -204,6 +204,8 @@
 		spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
 		list_del(&channel->listentry);
 		spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
+
+		primary_channel = channel;
 	} else {
 		primary_channel = channel->primary_channel;
 		spin_lock_irqsave(&primary_channel->lock, flags);
@@ -211,6 +213,14 @@
 		primary_channel->num_sc--;
 		spin_unlock_irqrestore(&primary_channel->lock, flags);
 	}
+
+	/*
+	 * We need to free the bit for init_vp_index() to work in the case
+	 * of sub-channel, when we reload drivers like hv_netvsc.
+	 */
+	cpumask_clear_cpu(channel->target_cpu,
+			  &primary_channel->alloced_cpus_in_node);
+
 	free_channel(channel);
 }
 
@@ -458,6 +468,13 @@
 			continue;
 		}
 
+		/*
+		 * NOTE: in the case of sub-channel, we clear the sub-channel
+		 * related bit(s) in primary->alloced_cpus_in_node in
+		 * hv_process_channel_removal(), so when we reload drivers
+		 * like hv_netvsc in SMP guest, here we're able to re-allocate
+		 * bit from primary->alloced_cpus_in_node.
+		 */
 		if (!cpumask_test_cpu(cur_cpu,
 				&primary->alloced_cpus_in_node)) {
 			cpumask_set_cpu(cur_cpu,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 500b262..e13c902 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1140,8 +1140,8 @@
 	help
 	  If you say yes here you get support for the hardware monitoring
 	  functionality of the Nuvoton NCT6106D, NCT6775F, NCT6776F, NCT6779D,
-	  NCT6791D, NCT6792D and compatible Super-I/O chips. This driver
-	  replaces the w83627ehf driver for NCT6775F and NCT6776F.
+	  NCT6791D, NCT6792D, NCT6793D, and compatible Super-I/O chips. This
+	  driver replaces the w83627ehf driver for NCT6775F and NCT6776F.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called nct6775.
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index 6cb89c0..1fd4685 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -470,6 +470,7 @@
 	{ .compatible = "stericsson,abx500-temp" },
 	{},
 };
+MODULE_DEVICE_TABLE(of, abx500_temp_match);
 #endif
 
 static struct platform_driver abx500_temp_driver = {
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index a3dae6d..82de3de 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -539,6 +539,7 @@
 	{ .compatible = "gpio-fan", },
 	{},
 };
+MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
 #endif /* CONFIG_OF_GPIO */
 
 static int gpio_fan_probe(struct platform_device *pdev)
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index fe41d5a..e4e57bb 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -104,7 +104,7 @@
 
 /* sysfs attributes for hwmon */
 
-static int lm75_read_temp(void *dev, long *temp)
+static int lm75_read_temp(void *dev, int *temp)
 {
 	struct lm75_data *data = lm75_update_device(dev);
 
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index bd1c99d..8b4fa55 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -39,6 +39,7 @@
  * nct6779d    15      5       5       2+6    0xc560 0xc1    0x5ca3
  * nct6791d    15      6       6       2+6    0xc800 0xc1    0x5ca3
  * nct6792d    15      6       6       2+6    0xc910 0xc1    0x5ca3
+ * nct6793d    15      6       6       2+6    0xd120 0xc1    0x5ca3
  *
  * #temp lists the number of monitored temperature sources (first value) plus
  * the number of directly connectable temperature sensors (second value).
@@ -63,7 +64,7 @@
 
 #define USE_ALTERNATE
 
-enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792 };
+enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793 };
 
 /* used to set data->name = nct6775_device_names[data->sio_kind] */
 static const char * const nct6775_device_names[] = {
@@ -73,6 +74,17 @@
 	"nct6779",
 	"nct6791",
 	"nct6792",
+	"nct6793",
+};
+
+static const char * const nct6775_sio_names[] __initconst = {
+	"NCT6106D",
+	"NCT6775F",
+	"NCT6776D/F",
+	"NCT6779D",
+	"NCT6791D",
+	"NCT6792D",
+	"NCT6793D",
 };
 
 static unsigned short force_id;
@@ -104,6 +116,7 @@
 #define SIO_NCT6779_ID		0xc560
 #define SIO_NCT6791_ID		0xc800
 #define SIO_NCT6792_ID		0xc910
+#define SIO_NCT6793_ID		0xd120
 #define SIO_ID_MASK		0xFFF0
 
 enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
@@ -354,6 +367,10 @@
 
 /* NCT6776 specific data */
 
+/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */
+#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME
+#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME
+
 static const s8 NCT6776_ALARM_BITS[] = {
 	0, 1, 2, 3, 8, 21, 20, 16,	/* in0.. in7 */
 	17, -1, -1, -1, -1, -1, -1,	/* in8..in14 */
@@ -533,7 +550,7 @@
 	4, 5, 13, -1, -1, -1,		/* temp1..temp6 */
 	12, 9 };			/* intrusion0, intrusion1 */
 
-/* NCT6792 specific data */
+/* NCT6792/NCT6793 specific data */
 
 static const u16 NCT6792_REG_TEMP_MON[] = {
 	0x73, 0x75, 0x77, 0x79, 0x7b, 0x7d };
@@ -1056,6 +1073,7 @@
 	case nct6779:
 	case nct6791:
 	case nct6792:
+	case nct6793:
 		return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
 		  ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) ||
 		  reg == 0x402 ||
@@ -1407,6 +1425,7 @@
 		case nct6779:
 		case nct6791:
 		case nct6792:
+		case nct6793:
 			reg = nct6775_read_value(data,
 					data->REG_CRITICAL_PWM_ENABLE[i]);
 			if (reg & data->CRITICAL_PWM_ENABLE_MASK)
@@ -2822,6 +2841,7 @@
 		case nct6779:
 		case nct6791:
 		case nct6792:
+		case nct6793:
 			nct6775_write_value(data, data->REG_CRITICAL_PWM[nr],
 					    val);
 			reg = nct6775_read_value(data,
@@ -3256,7 +3276,7 @@
 		pwm4pin = false;
 		pwm5pin = false;
 		pwm6pin = false;
-	} else {	/* NCT6779D, NCT6791D, or NCT6792D */
+	} else {	/* NCT6779D, NCT6791D, NCT6792D, or NCT6793D */
 		regval = superio_inb(sioreg, 0x1c);
 
 		fan3pin = !(regval & (1 << 5));
@@ -3269,7 +3289,8 @@
 
 		fan4min = fan4pin;
 
-		if (data->kind == nct6791 || data->kind == nct6792) {
+		if (data->kind == nct6791 || data->kind == nct6792 ||
+		    data->kind == nct6793) {
 			regval = superio_inb(sioreg, 0x2d);
 			fan6pin = (regval & (1 << 1));
 			pwm6pin = (regval & (1 << 0));
@@ -3528,8 +3549,8 @@
 		data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
 		data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
 		data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
-		data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
-		data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+		data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+		data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
 		data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
 		data->REG_PWM[0] = NCT6775_REG_PWM;
 		data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3600,8 +3621,8 @@
 		data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
 		data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
 		data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
-		data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
-		data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+		data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+		data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
 		data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
 		data->REG_PWM[0] = NCT6775_REG_PWM;
 		data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3643,6 +3664,7 @@
 		break;
 	case nct6791:
 	case nct6792:
+	case nct6793:
 		data->in_num = 15;
 		data->pwm_num = 6;
 		data->auto_pwm_num = 4;
@@ -3677,8 +3699,8 @@
 		data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
 		data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
 		data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
-		data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
-		data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+		data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+		data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
 		data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
 		data->REG_PWM[0] = NCT6775_REG_PWM;
 		data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3918,6 +3940,7 @@
 	case nct6779:
 	case nct6791:
 	case nct6792:
+	case nct6793:
 		break;
 	}
 
@@ -3950,6 +3973,7 @@
 			break;
 		case nct6791:
 		case nct6792:
+		case nct6793:
 			tmp |= 0x7e;
 			break;
 		}
@@ -4047,7 +4071,8 @@
 	if (reg != data->sio_reg_enable)
 		superio_outb(sioreg, SIO_REG_ENABLE, data->sio_reg_enable);
 
-	if (data->kind == nct6791 || data->kind == nct6792)
+	if (data->kind == nct6791 || data->kind == nct6792 ||
+	    data->kind == nct6793)
 		nct6791_enable_io_mapping(sioreg);
 
 	superio_exit(sioreg);
@@ -4106,15 +4131,6 @@
 	.probe		= nct6775_probe,
 };
 
-static const char * const nct6775_sio_names[] __initconst = {
-	"NCT6106D",
-	"NCT6775F",
-	"NCT6776D/F",
-	"NCT6779D",
-	"NCT6791D",
-	"NCT6792D",
-};
-
 /* nct6775_find() looks for a '627 in the Super-I/O config space */
 static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
 {
@@ -4150,6 +4166,9 @@
 	case SIO_NCT6792_ID:
 		sio_data->kind = nct6792;
 		break;
+	case SIO_NCT6793_ID:
+		sio_data->kind = nct6793;
+		break;
 	default:
 		if (val != 0xffff)
 			pr_debug("unsupported chip ID: 0x%04x\n", val);
@@ -4175,7 +4194,8 @@
 		superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
 	}
 
-	if (sio_data->kind == nct6791 || sio_data->kind == nct6792)
+	if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
+	    sio_data->kind == nct6793)
 		nct6791_enable_io_mapping(sioaddr);
 
 	superio_exit(sioaddr);
@@ -4285,7 +4305,7 @@
 }
 
 MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
-MODULE_DESCRIPTION("NCT6106D/NCT6775F/NCT6776F/NCT6779D/NCT6791D/NCT6792D driver");
+MODULE_DESCRIPTION("Driver for NCT6775F and compatible chips");
 MODULE_LICENSE("GPL");
 
 module_init(sensors_nct6775_init);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index dc0b76c..feed306 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -477,7 +477,7 @@
 	return -EINVAL;
 }
 
-static int ntc_read_temp(void *dev, long *temp)
+static int ntc_read_temp(void *dev, int *temp)
 {
 	struct ntc_data *data = dev_get_drvdata(dev);
 	int ohm;
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 2d9a712..3e23003 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -323,6 +323,7 @@
 	{ .compatible = "pwm-fan", },
 	{},
 };
+MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
 
 static struct platform_driver pwm_fan_driver = {
 	.probe		= pwm_fan_probe,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 9da2735..6548262 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -98,7 +98,7 @@
 	return tmp102;
 }
 
-static int tmp102_read_temp(void *dev, long *temp)
+static int tmp102_read_temp(void *dev, int *temp)
 {
 	struct tmp102 *tmp102 = tmp102_update_device(dev);
 
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 3a3738f..cd4510a 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -620,7 +620,7 @@
 		.name = "C6-SKL",
 		.desc = "MWAIT 0x20",
 		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
-		.exit_latency = 75,
+		.exit_latency = 85,
 		.target_residency = 200,
 		.enter = &intel_idle,
 		.enter_freeze = intel_idle_freeze, },
@@ -636,11 +636,19 @@
 		.name = "C8-SKL",
 		.desc = "MWAIT 0x40",
 		.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
-		.exit_latency = 174,
+		.exit_latency = 200,
 		.target_residency = 800,
 		.enter = &intel_idle,
 		.enter_freeze = intel_idle_freeze, },
 	{
+		.name = "C9-SKL",
+		.desc = "MWAIT 0x50",
+		.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 480,
+		.target_residency = 5000,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
 		.name = "C10-SKL",
 		.desc = "MWAIT 0x60",
 		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index da4c697..aa26f3c 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -56,7 +56,6 @@
 
 source "drivers/infiniband/hw/mthca/Kconfig"
 source "drivers/infiniband/hw/qib/Kconfig"
-source "drivers/infiniband/hw/ehca/Kconfig"
 source "drivers/infiniband/hw/cxgb3/Kconfig"
 source "drivers/infiniband/hw/cxgb4/Kconfig"
 source "drivers/infiniband/hw/mlx4/Kconfig"
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 1bdb999..aded2a5 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -1,6 +1,5 @@
 obj-$(CONFIG_INFINIBAND_MTHCA)		+= mthca/
 obj-$(CONFIG_INFINIBAND_QIB)		+= qib/
-obj-$(CONFIG_INFINIBAND_EHCA)		+= ehca/
 obj-$(CONFIG_INFINIBAND_CXGB3)		+= cxgb3/
 obj-$(CONFIG_INFINIBAND_CXGB4)		+= cxgb4/
 obj-$(CONFIG_MLX4_INFINIBAND)		+= mlx4/
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 41d6911..f1ccd40 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -245,7 +245,6 @@
 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
 	if (MLX5_CAP_GEN(mdev, apm))
 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
-	props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
 	if (MLX5_CAP_GEN(mdev, xrc))
 		props->device_cap_flags |= IB_DEVICE_XRC;
 	props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
@@ -795,53 +794,6 @@
 	return 0;
 }
 
-static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
-{
-	struct mlx5_create_mkey_mbox_in *in;
-	struct mlx5_mkey_seg *seg;
-	struct mlx5_core_mr mr;
-	int err;
-
-	in = kzalloc(sizeof(*in), GFP_KERNEL);
-	if (!in)
-		return -ENOMEM;
-
-	seg = &in->seg;
-	seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA;
-	seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
-	seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
-	seg->start_addr = 0;
-
-	err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
-				    NULL, NULL, NULL);
-	if (err) {
-		mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
-		goto err_in;
-	}
-
-	kfree(in);
-	*key = mr.key;
-
-	return 0;
-
-err_in:
-	kfree(in);
-
-	return err;
-}
-
-static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
-{
-	struct mlx5_core_mr mr;
-	int err;
-
-	memset(&mr, 0, sizeof(mr));
-	mr.key = key;
-	err = mlx5_core_destroy_mkey(dev->mdev, &mr);
-	if (err)
-		mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
-}
-
 static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
 				      struct ib_ucontext *context,
 				      struct ib_udata *udata)
@@ -867,13 +819,6 @@
 			kfree(pd);
 			return ERR_PTR(-EFAULT);
 		}
-	} else {
-		err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
-		if (err) {
-			mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
-			kfree(pd);
-			return ERR_PTR(err);
-		}
 	}
 
 	return &pd->ibpd;
@@ -884,9 +829,6 @@
 	struct mlx5_ib_dev *mdev = to_mdev(pd->device);
 	struct mlx5_ib_pd *mpd = to_mpd(pd);
 
-	if (!pd->uobject)
-		free_pa_mkey(mdev, mpd->pa_lkey);
-
 	mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
 	kfree(mpd);
 
@@ -1245,18 +1187,10 @@
 	struct ib_srq_init_attr attr;
 	struct mlx5_ib_dev *dev;
 	struct ib_cq_init_attr cq_attr = {.cqe = 1};
-	u32 rsvd_lkey;
 	int ret = 0;
 
 	dev = container_of(devr, struct mlx5_ib_dev, devr);
 
-	ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey);
-	if (ret) {
-		pr_err("Failed to query special context %d\n", ret);
-		return ret;
-	}
-	dev->ib_dev.local_dma_lkey = rsvd_lkey;
-
 	devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
 	if (IS_ERR(devr->p0)) {
 		ret = PTR_ERR(devr->p0);
@@ -1418,6 +1352,7 @@
 	strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
 	dev->ib_dev.owner		= THIS_MODULE;
 	dev->ib_dev.node_type		= RDMA_NODE_IB_CA;
+	dev->ib_dev.local_dma_lkey	= 0 /* not supported for now */;
 	dev->num_ports		= MLX5_CAP_GEN(mdev, num_ports);
 	dev->ib_dev.phys_port_cnt     = dev->num_ports;
 	dev->ib_dev.num_comp_vectors    =
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index bb8cda7..22123b7 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -103,7 +103,6 @@
 struct mlx5_ib_pd {
 	struct ib_pd		ibpd;
 	u32			pdn;
-	u32			pa_lkey;
 };
 
 /* Use macros here so that don't have to duplicate
@@ -213,7 +212,6 @@
 	int			uuarn;
 
 	int			create_type;
-	u32			pa_lkey;
 
 	/* Store signature errors */
 	bool			signature_en;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index c745c6c..6f521a3 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -925,8 +925,6 @@
 			err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
 			if (err)
 				mlx5_ib_dbg(dev, "err %d\n", err);
-			else
-				qp->pa_lkey = to_mpd(pd)->pa_lkey;
 		}
 
 		if (err)
@@ -2045,7 +2043,7 @@
 		mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
 	dseg->addr = cpu_to_be64(mfrpl->map);
 	dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
-	dseg->lkey = cpu_to_be32(pd->pa_lkey);
+	dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
 }
 
 static __be32 send_ieth(struct ib_send_wr *wr)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ca28736..4cd5428 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -80,7 +80,7 @@
 	IPOIB_NUM_WC		  = 4,
 
 	IPOIB_MAX_PATH_REC_QUEUE  = 3,
-	IPOIB_MAX_MCAST_QUEUE	  = 3,
+	IPOIB_MAX_MCAST_QUEUE	  = 64,
 
 	IPOIB_FLAG_OPER_UP	  = 0,
 	IPOIB_FLAG_INITIALIZED	  = 1,
@@ -548,6 +548,8 @@
 
 int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
 		       union ib_gid *mgid, int set_qkey);
+int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast);
+struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid);
 
 int ipoib_init_qp(struct net_device *dev);
 int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 36536ce..f74316e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1149,6 +1149,9 @@
 	unsigned long dt;
 	unsigned long flags;
 	int i;
+	LIST_HEAD(remove_list);
+	struct ipoib_mcast *mcast, *tmcast;
+	struct net_device *dev = priv->dev;
 
 	if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
 		return;
@@ -1176,6 +1179,19 @@
 							  lockdep_is_held(&priv->lock))) != NULL) {
 			/* was the neigh idle for two GC periods */
 			if (time_after(neigh_obsolete, neigh->alive)) {
+				u8 *mgid = neigh->daddr + 4;
+
+				/* Is this multicast ? */
+				if (*mgid == 0xff) {
+					mcast = __ipoib_mcast_find(dev, mgid);
+
+					if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+						list_del(&mcast->list);
+						rb_erase(&mcast->rb_node, &priv->multicast_tree);
+						list_add_tail(&mcast->list, &remove_list);
+					}
+				}
+
 				rcu_assign_pointer(*np,
 						   rcu_dereference_protected(neigh->hnext,
 									     lockdep_is_held(&priv->lock)));
@@ -1191,6 +1207,8 @@
 
 out_unlock:
 	spin_unlock_irqrestore(&priv->lock, flags);
+	list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
+		ipoib_mcast_leave(dev, mcast);
 }
 
 static void ipoib_reap_neigh(struct work_struct *work)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 09a1748..136cbef 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -153,7 +153,7 @@
 	return mcast;
 }
 
-static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
+struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct rb_node *n = priv->multicast_tree.rb_node;
@@ -508,17 +508,19 @@
 		rec.hop_limit	  = priv->broadcast->mcmember.hop_limit;
 
 		/*
-		 * Historically Linux IPoIB has never properly supported SEND
-		 * ONLY join. It emulated it by not providing all the required
-		 * attributes, which is enough to prevent group creation and
-		 * detect if there are full members or not. A major problem
-		 * with supporting SEND ONLY is detecting when the group is
-		 * auto-destroyed as IPoIB will cache the MLID..
+		 * Send-only IB Multicast joins do not work at the core
+		 * IB layer yet, so we can't use them here.  However,
+		 * we are emulating an Ethernet multicast send, which
+		 * does not require a multicast subscription and will
+		 * still send properly.  The most appropriate thing to
+		 * do is to create the group if it doesn't exist as that
+		 * most closely emulates the behavior, from a user space
+		 * application perspecitive, of Ethernet multicast
+		 * operation.  For now, we do a full join, maybe later
+		 * when the core IB layers support send only joins we
+		 * will use them.
 		 */
-#if 1
-		if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
-			comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
-#else
+#if 0
 		if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
 			rec.join_state = 4;
 #endif
@@ -675,7 +677,7 @@
 	return 0;
 }
 
-static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
+int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	int ret = 0;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 1ace5d8..f58ff96 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -97,6 +97,11 @@
 module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
 
+bool iser_always_reg = true;
+module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
+MODULE_PARM_DESC(always_register,
+		 "Always register memory, even for continuous memory regions (default:true)");
+
 bool iser_pi_enable = false;
 module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
 MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 86f6583..a5edd6e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -611,6 +611,7 @@
 extern bool iser_pi_enable;
 extern int iser_pi_guard;
 extern unsigned int iser_max_sectors;
+extern bool iser_always_reg;
 
 int iser_assign_reg_ops(struct iser_device *device);
 
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2493cc7..4c46d67 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -803,11 +803,12 @@
 iser_reg_prot_sg(struct iscsi_iser_task *task,
 		 struct iser_data_buf *mem,
 		 struct iser_fr_desc *desc,
+		 bool use_dma_key,
 		 struct iser_mem_reg *reg)
 {
 	struct iser_device *device = task->iser_conn->ib_conn.device;
 
-	if (mem->dma_nents == 1)
+	if (use_dma_key)
 		return iser_reg_dma(device, mem, reg);
 
 	return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
@@ -817,11 +818,12 @@
 iser_reg_data_sg(struct iscsi_iser_task *task,
 		 struct iser_data_buf *mem,
 		 struct iser_fr_desc *desc,
+		 bool use_dma_key,
 		 struct iser_mem_reg *reg)
 {
 	struct iser_device *device = task->iser_conn->ib_conn.device;
 
-	if (mem->dma_nents == 1)
+	if (use_dma_key)
 		return iser_reg_dma(device, mem, reg);
 
 	return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
@@ -836,14 +838,17 @@
 	struct iser_mem_reg *reg = &task->rdma_reg[dir];
 	struct iser_mem_reg *data_reg;
 	struct iser_fr_desc *desc = NULL;
+	bool use_dma_key;
 	int err;
 
 	err = iser_handle_unaligned_buf(task, mem, dir);
 	if (unlikely(err))
 		return err;
 
-	if (mem->dma_nents != 1 ||
-	    scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
+	use_dma_key = (mem->dma_nents == 1 && !iser_always_reg &&
+		       scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL);
+
+	if (!use_dma_key) {
 		desc = device->reg_ops->reg_desc_get(ib_conn);
 		reg->mem_h = desc;
 	}
@@ -853,7 +858,7 @@
 	else
 		data_reg = &task->desc.data_reg;
 
-	err = iser_reg_data_sg(task, mem, desc, data_reg);
+	err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
 	if (unlikely(err))
 		goto err_reg;
 
@@ -866,7 +871,8 @@
 			if (unlikely(err))
 				goto err_reg;
 
-			err = iser_reg_prot_sg(task, mem, desc, prot_reg);
+			err = iser_reg_prot_sg(task, mem, desc,
+					       use_dma_key, prot_reg);
 			if (unlikely(err))
 				goto err_reg;
 		}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index ae70cc1..85132d8 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -133,11 +133,15 @@
 			     (unsigned long)comp);
 	}
 
-	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
-				   IB_ACCESS_REMOTE_WRITE |
-				   IB_ACCESS_REMOTE_READ);
-	if (IS_ERR(device->mr))
-		goto dma_mr_err;
+	if (!iser_always_reg) {
+		int access = IB_ACCESS_LOCAL_WRITE |
+			     IB_ACCESS_REMOTE_WRITE |
+			     IB_ACCESS_REMOTE_READ;
+
+		device->mr = ib_get_dma_mr(device->pd, access);
+		if (IS_ERR(device->mr))
+			goto dma_mr_err;
+	}
 
 	INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
 				iser_event_handler);
@@ -147,7 +151,8 @@
 	return 0;
 
 handler_err:
-	ib_dereg_mr(device->mr);
+	if (device->mr)
+		ib_dereg_mr(device->mr);
 dma_mr_err:
 	for (i = 0; i < device->comps_used; i++)
 		tasklet_kill(&device->comps[i].tasklet);
@@ -173,7 +178,6 @@
 static void iser_free_device_ib_res(struct iser_device *device)
 {
 	int i;
-	BUG_ON(device->mr == NULL);
 
 	for (i = 0; i < device->comps_used; i++) {
 		struct iser_comp *comp = &device->comps[i];
@@ -184,7 +188,8 @@
 	}
 
 	(void)ib_unregister_event_handler(&device->event_handler);
-	(void)ib_dereg_mr(device->mr);
+	if (device->mr)
+		(void)ib_dereg_mr(device->mr);
 	ib_dealloc_pd(device->pd);
 
 	kfree(device->comps);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index dc439a4..aa59037 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -238,8 +238,6 @@
 		rx_sg->lkey = device->pd->local_dma_lkey;
 	}
 
-	isert_conn->rx_desc_head = 0;
-
 	return 0;
 
 dma_map_fail:
@@ -634,7 +632,7 @@
 isert_init_conn(struct isert_conn *isert_conn)
 {
 	isert_conn->state = ISER_CONN_INIT;
-	INIT_LIST_HEAD(&isert_conn->accept_node);
+	INIT_LIST_HEAD(&isert_conn->node);
 	init_completion(&isert_conn->login_comp);
 	init_completion(&isert_conn->login_req_comp);
 	init_completion(&isert_conn->wait);
@@ -762,28 +760,15 @@
 	ret = isert_rdma_post_recvl(isert_conn);
 	if (ret)
 		goto out_conn_dev;
-	/*
-	 * Obtain the second reference now before isert_rdma_accept() to
-	 * ensure that any initiator generated REJECT CM event that occurs
-	 * asynchronously won't drop the last reference until the error path
-	 * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
-	 * isert_free_conn() -> isert_put_conn() -> kref_put().
-	 */
-	if (!kref_get_unless_zero(&isert_conn->kref)) {
-		isert_warn("conn %p connect_release is running\n", isert_conn);
-		goto out_conn_dev;
-	}
 
 	ret = isert_rdma_accept(isert_conn);
 	if (ret)
 		goto out_conn_dev;
 
-	mutex_lock(&isert_np->np_accept_mutex);
-	list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list);
-	mutex_unlock(&isert_np->np_accept_mutex);
+	mutex_lock(&isert_np->mutex);
+	list_add_tail(&isert_conn->node, &isert_np->accepted);
+	mutex_unlock(&isert_np->mutex);
 
-	isert_info("np %p: Allow accept_np to continue\n", np);
-	up(&isert_np->np_sem);
 	return 0;
 
 out_conn_dev:
@@ -831,13 +816,21 @@
 isert_connected_handler(struct rdma_cm_id *cma_id)
 {
 	struct isert_conn *isert_conn = cma_id->qp->qp_context;
+	struct isert_np *isert_np = cma_id->context;
 
 	isert_info("conn %p\n", isert_conn);
 
 	mutex_lock(&isert_conn->mutex);
-	if (isert_conn->state != ISER_CONN_FULL_FEATURE)
-		isert_conn->state = ISER_CONN_UP;
+	isert_conn->state = ISER_CONN_UP;
+	kref_get(&isert_conn->kref);
 	mutex_unlock(&isert_conn->mutex);
+
+	mutex_lock(&isert_np->mutex);
+	list_move_tail(&isert_conn->node, &isert_np->pending);
+	mutex_unlock(&isert_np->mutex);
+
+	isert_info("np %p: Allow accept_np to continue\n", isert_np);
+	up(&isert_np->sem);
 }
 
 static void
@@ -903,14 +896,14 @@
 
 	switch (event) {
 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
-		isert_np->np_cm_id = NULL;
+		isert_np->cm_id = NULL;
 		break;
 	case RDMA_CM_EVENT_ADDR_CHANGE:
-		isert_np->np_cm_id = isert_setup_id(isert_np);
-		if (IS_ERR(isert_np->np_cm_id)) {
+		isert_np->cm_id = isert_setup_id(isert_np);
+		if (IS_ERR(isert_np->cm_id)) {
 			isert_err("isert np %p setup id failed: %ld\n",
-				  isert_np, PTR_ERR(isert_np->np_cm_id));
-			isert_np->np_cm_id = NULL;
+				  isert_np, PTR_ERR(isert_np->cm_id));
+			isert_np->cm_id = NULL;
 		}
 		break;
 	default:
@@ -929,7 +922,7 @@
 	struct isert_conn *isert_conn;
 	bool terminating = false;
 
-	if (isert_np->np_cm_id == cma_id)
+	if (isert_np->cm_id == cma_id)
 		return isert_np_cma_handler(cma_id->context, event);
 
 	isert_conn = cma_id->qp->qp_context;
@@ -945,13 +938,13 @@
 	if (terminating)
 		goto out;
 
-	mutex_lock(&isert_np->np_accept_mutex);
-	if (!list_empty(&isert_conn->accept_node)) {
-		list_del_init(&isert_conn->accept_node);
+	mutex_lock(&isert_np->mutex);
+	if (!list_empty(&isert_conn->node)) {
+		list_del_init(&isert_conn->node);
 		isert_put_conn(isert_conn);
 		queue_work(isert_release_wq, &isert_conn->release_work);
 	}
-	mutex_unlock(&isert_np->np_accept_mutex);
+	mutex_unlock(&isert_np->mutex);
 
 out:
 	return 0;
@@ -962,6 +955,7 @@
 {
 	struct isert_conn *isert_conn = cma_id->qp->qp_context;
 
+	list_del_init(&isert_conn->node);
 	isert_conn->cm_id = NULL;
 	isert_put_conn(isert_conn);
 
@@ -1006,35 +1000,51 @@
 }
 
 static int
-isert_post_recv(struct isert_conn *isert_conn, u32 count)
+isert_post_recvm(struct isert_conn *isert_conn, u32 count)
 {
 	struct ib_recv_wr *rx_wr, *rx_wr_failed;
 	int i, ret;
-	unsigned int rx_head = isert_conn->rx_desc_head;
 	struct iser_rx_desc *rx_desc;
 
 	for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
-		rx_desc		= &isert_conn->rx_descs[rx_head];
-		rx_wr->wr_id	= (uintptr_t)rx_desc;
-		rx_wr->sg_list	= &rx_desc->rx_sg;
-		rx_wr->num_sge	= 1;
-		rx_wr->next	= rx_wr + 1;
-		rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
+		rx_desc = &isert_conn->rx_descs[i];
+		rx_wr->wr_id = (uintptr_t)rx_desc;
+		rx_wr->sg_list = &rx_desc->rx_sg;
+		rx_wr->num_sge = 1;
+		rx_wr->next = rx_wr + 1;
 	}
-
 	rx_wr--;
 	rx_wr->next = NULL; /* mark end of work requests list */
 
 	isert_conn->post_recv_buf_count += count;
 	ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
-				&rx_wr_failed);
+			   &rx_wr_failed);
 	if (ret) {
 		isert_err("ib_post_recv() failed with ret: %d\n", ret);
 		isert_conn->post_recv_buf_count -= count;
-	} else {
-		isert_dbg("Posted %d RX buffers\n", count);
-		isert_conn->rx_desc_head = rx_head;
 	}
+
+	return ret;
+}
+
+static int
+isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
+{
+	struct ib_recv_wr *rx_wr_failed, rx_wr;
+	int ret;
+
+	rx_wr.wr_id = (uintptr_t)rx_desc;
+	rx_wr.sg_list = &rx_desc->rx_sg;
+	rx_wr.num_sge = 1;
+	rx_wr.next = NULL;
+
+	isert_conn->post_recv_buf_count++;
+	ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
+	if (ret) {
+		isert_err("ib_post_recv() failed with ret: %d\n", ret);
+		isert_conn->post_recv_buf_count--;
+	}
+
 	return ret;
 }
 
@@ -1205,7 +1215,8 @@
 			if (ret)
 				return ret;
 
-			ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
+			ret = isert_post_recvm(isert_conn,
+					       ISERT_QP_MAX_RECV_DTOS);
 			if (ret)
 				return ret;
 
@@ -1278,7 +1289,7 @@
 }
 
 static struct iscsi_cmd
-*isert_allocate_cmd(struct iscsi_conn *conn)
+*isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
 {
 	struct isert_conn *isert_conn = conn->context;
 	struct isert_cmd *isert_cmd;
@@ -1292,6 +1303,7 @@
 	isert_cmd = iscsit_priv_cmd(cmd);
 	isert_cmd->conn = isert_conn;
 	isert_cmd->iscsi_cmd = cmd;
+	isert_cmd->rx_desc = rx_desc;
 
 	return cmd;
 }
@@ -1303,9 +1315,9 @@
 {
 	struct iscsi_conn *conn = isert_conn->conn;
 	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
-	struct scatterlist *sg;
 	int imm_data, imm_data_len, unsol_data, sg_nents, rc;
 	bool dump_payload = false;
+	unsigned int data_len;
 
 	rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
 	if (rc < 0)
@@ -1314,7 +1326,10 @@
 	imm_data = cmd->immediate_data;
 	imm_data_len = cmd->first_burst_len;
 	unsol_data = cmd->unsolicited_data;
+	data_len = cmd->se_cmd.data_length;
 
+	if (imm_data && imm_data_len == data_len)
+		cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
 	rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
 	if (rc < 0) {
 		return 0;
@@ -1326,13 +1341,20 @@
 	if (!imm_data)
 		return 0;
 
-	sg = &cmd->se_cmd.t_data_sg[0];
-	sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
-
-	isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
-		  sg, sg_nents, &rx_desc->data[0], imm_data_len);
-
-	sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
+	if (imm_data_len != data_len) {
+		sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
+		sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
+				    &rx_desc->data[0], imm_data_len);
+		isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
+			  sg_nents, imm_data_len);
+	} else {
+		sg_init_table(&isert_cmd->sg, 1);
+		cmd->se_cmd.t_data_sg = &isert_cmd->sg;
+		cmd->se_cmd.t_data_nents = 1;
+		sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
+		isert_dbg("Transfer Immediate imm_data_len: %d\n",
+			  imm_data_len);
+	}
 
 	cmd->write_data_done += imm_data_len;
 
@@ -1407,6 +1429,15 @@
 	if (rc < 0)
 		return rc;
 
+	/*
+	 * multiple data-outs on the same command can arrive -
+	 * so post the buffer before hand
+	 */
+	rc = isert_post_recv(isert_conn, rx_desc);
+	if (rc) {
+		isert_err("ib_post_recv failed with %d\n", rc);
+		return rc;
+	}
 	return 0;
 }
 
@@ -1479,7 +1510,7 @@
 
 	switch (opcode) {
 	case ISCSI_OP_SCSI_CMD:
-		cmd = isert_allocate_cmd(conn);
+		cmd = isert_allocate_cmd(conn, rx_desc);
 		if (!cmd)
 			break;
 
@@ -1493,7 +1524,7 @@
 					rx_desc, (unsigned char *)hdr);
 		break;
 	case ISCSI_OP_NOOP_OUT:
-		cmd = isert_allocate_cmd(conn);
+		cmd = isert_allocate_cmd(conn, rx_desc);
 		if (!cmd)
 			break;
 
@@ -1506,7 +1537,7 @@
 						(unsigned char *)hdr);
 		break;
 	case ISCSI_OP_SCSI_TMFUNC:
-		cmd = isert_allocate_cmd(conn);
+		cmd = isert_allocate_cmd(conn, rx_desc);
 		if (!cmd)
 			break;
 
@@ -1514,22 +1545,20 @@
 						(unsigned char *)hdr);
 		break;
 	case ISCSI_OP_LOGOUT:
-		cmd = isert_allocate_cmd(conn);
+		cmd = isert_allocate_cmd(conn, rx_desc);
 		if (!cmd)
 			break;
 
 		ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
 		break;
 	case ISCSI_OP_TEXT:
-		if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
+		if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
 			cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
-			if (!cmd)
-				break;
-		} else {
-			cmd = isert_allocate_cmd(conn);
-			if (!cmd)
-				break;
-		}
+		else
+			cmd = isert_allocate_cmd(conn, rx_desc);
+
+		if (!cmd)
+			break;
 
 		isert_cmd = iscsit_priv_cmd(cmd);
 		ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
@@ -1589,7 +1618,7 @@
 	struct ib_device *ib_dev = isert_conn->cm_id->device;
 	struct iscsi_hdr *hdr;
 	u64 rx_dma;
-	int rx_buflen, outstanding;
+	int rx_buflen;
 
 	if ((char *)desc == isert_conn->login_req_buf) {
 		rx_dma = isert_conn->login_req_dma;
@@ -1629,22 +1658,6 @@
 				      DMA_FROM_DEVICE);
 
 	isert_conn->post_recv_buf_count--;
-	isert_dbg("Decremented post_recv_buf_count: %d\n",
-		  isert_conn->post_recv_buf_count);
-
-	if ((char *)desc == isert_conn->login_req_buf)
-		return;
-
-	outstanding = isert_conn->post_recv_buf_count;
-	if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
-		int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
-				ISERT_MIN_POSTED_RX);
-		err = isert_post_recv(isert_conn, count);
-		if (err) {
-			isert_err("isert_post_recv() count: %d failed, %d\n",
-			       count, err);
-		}
-	}
 }
 
 static int
@@ -2156,6 +2169,12 @@
 	struct ib_send_wr *wr_failed;
 	int ret;
 
+	ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
+	if (ret) {
+		isert_err("ib_post_recv failed with %d\n", ret);
+		return ret;
+	}
+
 	ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
 			   &wr_failed);
 	if (ret) {
@@ -2950,6 +2969,12 @@
 				   &isert_cmd->tx_desc.send_wr);
 		isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
 		wr->send_wr_num += 1;
+
+		rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
+		if (rc) {
+			isert_err("ib_post_recv failed with %d\n", rc);
+			return rc;
+		}
 	}
 
 	rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
@@ -2999,9 +3024,16 @@
 static int
 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
 {
-	int ret;
+	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+	int ret = 0;
 
 	switch (state) {
+	case ISTATE_REMOVE:
+		spin_lock_bh(&conn->cmd_lock);
+		list_del_init(&cmd->i_conn_node);
+		spin_unlock_bh(&conn->cmd_lock);
+		isert_put_cmd(isert_cmd, true);
+		break;
 	case ISTATE_SEND_NOPIN_WANT_RESPONSE:
 		ret = isert_put_nopin(cmd, conn, false);
 		break;
@@ -3095,7 +3127,7 @@
 
 static int
 isert_setup_np(struct iscsi_np *np,
-	       struct __kernel_sockaddr_storage *ksockaddr)
+	       struct sockaddr_storage *ksockaddr)
 {
 	struct isert_np *isert_np;
 	struct rdma_cm_id *isert_lid;
@@ -3106,10 +3138,10 @@
 		isert_err("Unable to allocate struct isert_np\n");
 		return -ENOMEM;
 	}
-	sema_init(&isert_np->np_sem, 0);
-	mutex_init(&isert_np->np_accept_mutex);
-	INIT_LIST_HEAD(&isert_np->np_accept_list);
-	init_completion(&isert_np->np_login_comp);
+	sema_init(&isert_np->sem, 0);
+	mutex_init(&isert_np->mutex);
+	INIT_LIST_HEAD(&isert_np->accepted);
+	INIT_LIST_HEAD(&isert_np->pending);
 	isert_np->np = np;
 
 	/*
@@ -3117,7 +3149,7 @@
 	 * in iscsi_target_configfs.c code..
 	 */
 	memcpy(&np->np_sockaddr, ksockaddr,
-	       sizeof(struct __kernel_sockaddr_storage));
+	       sizeof(struct sockaddr_storage));
 
 	isert_lid = isert_setup_id(isert_np);
 	if (IS_ERR(isert_lid)) {
@@ -3125,7 +3157,7 @@
 		goto out;
 	}
 
-	isert_np->np_cm_id = isert_lid;
+	isert_np->cm_id = isert_lid;
 	np->np_context = isert_np;
 
 	return 0;
@@ -3199,32 +3231,11 @@
 {
 	struct rdma_cm_id *cm_id = isert_conn->cm_id;
 	struct rdma_route *cm_route = &cm_id->route;
-	struct sockaddr_in *sock_in;
-	struct sockaddr_in6 *sock_in6;
 
 	conn->login_family = np->np_sockaddr.ss_family;
 
-	if (np->np_sockaddr.ss_family == AF_INET6) {
-		sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
-		snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
-			 &sock_in6->sin6_addr.in6_u);
-		conn->login_port = ntohs(sock_in6->sin6_port);
-
-		sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
-		snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
-			 &sock_in6->sin6_addr.in6_u);
-		conn->local_port = ntohs(sock_in6->sin6_port);
-	} else {
-		sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
-		sprintf(conn->login_ip, "%pI4",
-			&sock_in->sin_addr.s_addr);
-		conn->login_port = ntohs(sock_in->sin_port);
-
-		sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
-		sprintf(conn->local_ip, "%pI4",
-			&sock_in->sin_addr.s_addr);
-		conn->local_port = ntohs(sock_in->sin_port);
-	}
+	conn->login_sockaddr = cm_route->addr.dst_addr;
+	conn->local_sockaddr = cm_route->addr.src_addr;
 }
 
 static int
@@ -3235,7 +3246,7 @@
 	int ret;
 
 accept_wait:
-	ret = down_interruptible(&isert_np->np_sem);
+	ret = down_interruptible(&isert_np->sem);
 	if (ret)
 		return -ENODEV;
 
@@ -3252,15 +3263,15 @@
 	}
 	spin_unlock_bh(&np->np_thread_lock);
 
-	mutex_lock(&isert_np->np_accept_mutex);
-	if (list_empty(&isert_np->np_accept_list)) {
-		mutex_unlock(&isert_np->np_accept_mutex);
+	mutex_lock(&isert_np->mutex);
+	if (list_empty(&isert_np->pending)) {
+		mutex_unlock(&isert_np->mutex);
 		goto accept_wait;
 	}
-	isert_conn = list_first_entry(&isert_np->np_accept_list,
-			struct isert_conn, accept_node);
-	list_del_init(&isert_conn->accept_node);
-	mutex_unlock(&isert_np->np_accept_mutex);
+	isert_conn = list_first_entry(&isert_np->pending,
+			struct isert_conn, node);
+	list_del_init(&isert_conn->node);
+	mutex_unlock(&isert_np->mutex);
 
 	conn->context = isert_conn;
 	isert_conn->conn = conn;
@@ -3278,28 +3289,39 @@
 	struct isert_np *isert_np = np->np_context;
 	struct isert_conn *isert_conn, *n;
 
-	if (isert_np->np_cm_id)
-		rdma_destroy_id(isert_np->np_cm_id);
+	if (isert_np->cm_id)
+		rdma_destroy_id(isert_np->cm_id);
 
 	/*
 	 * FIXME: At this point we don't have a good way to insure
 	 * that at this point we don't have hanging connections that
 	 * completed RDMA establishment but didn't start iscsi login
 	 * process. So work-around this by cleaning up what ever piled
-	 * up in np_accept_list.
+	 * up in accepted and pending lists.
 	 */
-	mutex_lock(&isert_np->np_accept_mutex);
-	if (!list_empty(&isert_np->np_accept_list)) {
-		isert_info("Still have isert connections, cleaning up...\n");
+	mutex_lock(&isert_np->mutex);
+	if (!list_empty(&isert_np->pending)) {
+		isert_info("Still have isert pending connections\n");
 		list_for_each_entry_safe(isert_conn, n,
-					 &isert_np->np_accept_list,
-					 accept_node) {
+					 &isert_np->pending,
+					 node) {
 			isert_info("cleaning isert_conn %p state (%d)\n",
 				   isert_conn, isert_conn->state);
 			isert_connect_release(isert_conn);
 		}
 	}
-	mutex_unlock(&isert_np->np_accept_mutex);
+
+	if (!list_empty(&isert_np->accepted)) {
+		isert_info("Still have isert accepted connections\n");
+		list_for_each_entry_safe(isert_conn, n,
+					 &isert_np->accepted,
+					 node) {
+			isert_info("cleaning isert_conn %p state (%d)\n",
+				   isert_conn, isert_conn->state);
+			isert_connect_release(isert_conn);
+		}
+	}
+	mutex_unlock(&isert_np->mutex);
 
 	np->np_context = NULL;
 	kfree(isert_np);
@@ -3366,6 +3388,41 @@
 	wait_for_completion(&isert_conn->wait_comp_err);
 }
 
+/**
+ * isert_put_unsol_pending_cmds() - Drop commands waiting for
+ *     unsolicitate dataout
+ * @conn:    iscsi connection
+ *
+ * We might still have commands that are waiting for unsolicited
+ * dataouts messages. We must put the extra reference on those
+ * before blocking on the target_wait_for_session_cmds
+ */
+static void
+isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
+{
+	struct iscsi_cmd *cmd, *tmp;
+	static LIST_HEAD(drop_cmd_list);
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
+		if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
+		    (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
+		    (cmd->write_data_done < cmd->se_cmd.data_length))
+			list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+
+	list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
+		list_del_init(&cmd->i_conn_node);
+		if (cmd->i_state != ISTATE_REMOVE) {
+			struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+
+			isert_info("conn %p dropping cmd %p\n", conn, cmd);
+			isert_put_cmd(isert_cmd, true);
+		}
+	}
+}
+
 static void isert_wait_conn(struct iscsi_conn *conn)
 {
 	struct isert_conn *isert_conn = conn->context;
@@ -3384,8 +3441,9 @@
 	isert_conn_terminate(isert_conn);
 	mutex_unlock(&isert_conn->mutex);
 
-	isert_wait4cmds(conn);
 	isert_wait4flush(isert_conn);
+	isert_put_unsol_pending_cmds(conn);
+	isert_wait4cmds(conn);
 	isert_wait4logout(isert_conn);
 
 	queue_work(isert_release_wq, &isert_conn->release_work);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 6a04ba3..c5b99bc 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -113,7 +113,6 @@
 };
 
 struct isert_rdma_wr {
-	struct list_head	wr_list;
 	struct isert_cmd	*isert_cmd;
 	enum iser_ib_op_code	iser_ib_op;
 	struct ib_sge		*ib_sge;
@@ -134,14 +133,13 @@
 	uint64_t		write_va;
 	u64			pdu_buf_dma;
 	u32			pdu_buf_len;
-	u32			read_va_off;
-	u32			write_va_off;
-	u32			rdma_wr_num;
 	struct isert_conn	*conn;
 	struct iscsi_cmd	*iscsi_cmd;
 	struct iser_tx_desc	tx_desc;
+	struct iser_rx_desc	*rx_desc;
 	struct isert_rdma_wr	rdma_wr;
 	struct work_struct	comp_work;
+	struct scatterlist	sg;
 };
 
 struct isert_device;
@@ -159,11 +157,10 @@
 	u64			login_req_dma;
 	int			login_req_len;
 	u64			login_rsp_dma;
-	unsigned int		rx_desc_head;
 	struct iser_rx_desc	*rx_descs;
-	struct ib_recv_wr	rx_wr[ISERT_MIN_POSTED_RX];
+	struct ib_recv_wr	rx_wr[ISERT_QP_MAX_RECV_DTOS];
 	struct iscsi_conn	*conn;
-	struct list_head	accept_node;
+	struct list_head	node;
 	struct completion	login_comp;
 	struct completion	login_req_comp;
 	struct iser_tx_desc	login_tx_desc;
@@ -222,9 +219,9 @@
 
 struct isert_np {
 	struct iscsi_np         *np;
-	struct semaphore	np_sem;
-	struct rdma_cm_id	*np_cm_id;
-	struct mutex		np_accept_mutex;
-	struct list_head	np_accept_list;
-	struct completion	np_login_comp;
+	struct semaphore	sem;
+	struct rdma_cm_id	*cm_id;
+	struct mutex		mutex;
+	struct list_head	accepted;
+	struct list_head	pending;
 };
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 9d35499..08d4964 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -290,19 +290,14 @@
 {
 	struct evdev_client *client = file->private_data;
 	struct evdev *evdev = client->evdev;
-	int retval;
 
-	retval = mutex_lock_interruptible(&evdev->mutex);
-	if (retval)
-		return retval;
+	mutex_lock(&evdev->mutex);
 
-	if (!evdev->exist || client->revoked)
-		retval = -ENODEV;
-	else
-		retval = input_flush_device(&evdev->handle, file);
+	if (evdev->exist && !client->revoked)
+		input_flush_device(&evdev->handle, file);
 
 	mutex_unlock(&evdev->mutex);
-	return retval;
+	return 0;
 }
 
 static void evdev_free(struct device *dev)
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 56eb471..4215b53 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -196,6 +196,7 @@
 config JOYSTICK_ZHENHUA
 	tristate "5-byte Zhenhua RC transmitter"
 	select SERIO
+	select BITREVERSE
 	help
 	  Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
 	  supplied with a ready to fly micro electric indoor helicopters
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index d2ea863..2165f3d 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -5,8 +5,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * <<Power management needs to be implemented>>.
  */
 
 #include <linux/clk.h>
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 1f7e15c..4f5ef5b 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -118,6 +118,7 @@
 	{ .compatible = "stericsson,ab8500-ponkey", },
 	{}
 };
+MODULE_DEVICE_TABLE(of, ab8500_ponkey_match);
 #endif
 
 static struct platform_driver ab8500_ponkey_driver = {
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index e82edf8..f2261ab 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -173,6 +173,7 @@
 	{ .compatible = "pwm-beeper", },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, pwm_beeper_match);
 #endif
 
 static struct platform_driver pwm_beeper_driver = {
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c
index 6bf3f10..a804705 100644
--- a/drivers/input/misc/regulator-haptic.c
+++ b/drivers/input/misc/regulator-haptic.c
@@ -249,6 +249,7 @@
 	{ .compatible = "regulator-haptic" },
 	{ /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, regulator_haptic_dt_match);
 
 static struct platform_driver regulator_haptic_driver = {
 	.probe		= regulator_haptic_probe,
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 54116e5..6f997aa 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -253,6 +253,7 @@
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, bbc_beep_match);
 
 static struct platform_driver bbc_beep_driver = {
 	.driver = {
@@ -332,6 +333,7 @@
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, grover_beep_match);
 
 static struct platform_driver grover_beep_driver = {
 	.driver = {
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index e2b7420..fa94530 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1170,6 +1170,7 @@
 	{ "ELAN0000", 0 },
 	{ "ELAN0100", 0 },
 	{ "ELAN0600", 0 },
+	{ "ELAN1000", 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, elan_acpi_id);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index c9c98f0a..db91de5 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -877,7 +877,7 @@
 static int i8042_controller_check(void)
 {
 	if (i8042_flush()) {
-		pr_err("No controller found\n");
+		pr_info("No controller found\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 059edeb..600dccef 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -479,6 +479,18 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called mtouch.
 
+config TOUCHSCREEN_IMX6UL_TSC
+	tristate "Freescale i.MX6UL touchscreen controller"
+	depends on (OF && GPIOLIB) || COMPILE_TEST
+	help
+	  Say Y here if you have a Freescale i.MX6UL, and want to
+	  use the internal touchscreen controller.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called imx6ul_tsc.
+
 config TOUCHSCREEN_INEXIO
 	tristate "iNexio serial touchscreens"
 	select SERIO
@@ -1040,4 +1052,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called zforce_ts.
 
+config TOUCHSCREEN_COLIBRI_VF50
+	tristate "Toradex Colibri on board touchscreen driver"
+	depends on GPIOLIB && IIO && VF610_ADC
+	help
+	  Say Y here if you have a Colibri VF50 and plan to use
+	  the on-board provided 4-wire touchscreen driver.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called colibri_vf50_ts.
+
 endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index c85aae2..1b79cc0 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -38,6 +38,7 @@
 obj-$(CONFIG_TOUCHSCREEN_FUJITSU)	+= fujitsu_ts.o
 obj-$(CONFIG_TOUCHSCREEN_GOODIX)	+= goodix.o
 obj-$(CONFIG_TOUCHSCREEN_ILI210X)	+= ili210x.o
+obj-$(CONFIG_TOUCHSCREEN_IMX6UL_TSC)	+= imx6ul_tsc.o
 obj-$(CONFIG_TOUCHSCREEN_INEXIO)	+= inexio.o
 obj-$(CONFIG_TOUCHSCREEN_INTEL_MID)	+= intel-mid-touch.o
 obj-$(CONFIG_TOUCHSCREEN_IPROC)		+= bcm_iproc_tsc.o
@@ -85,3 +86,4 @@
 obj-$(CONFIG_TOUCHSCREEN_SX8654)	+= sx8654.o
 obj-$(CONFIG_TOUCHSCREEN_TPS6507X)	+= tps6507x-ts.o
 obj-$(CONFIG_TOUCHSCREEN_ZFORCE)	+= zforce_ts.o
+obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50)	+= colibri-vf50-ts.o
diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c
new file mode 100644
index 0000000..5d4903a
--- /dev/null
+++ b/drivers/input/touchscreen/colibri-vf50-ts.c
@@ -0,0 +1,386 @@
+/*
+ * Toradex Colibri VF50 Touchscreen driver
+ *
+ * Copyright 2015 Toradex AG
+ *
+ * Originally authored by Stefan Agner for 3.0 kernel
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/types.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define DRIVER_NAME			"colibri-vf50-ts"
+#define DRV_VERSION			"1.0"
+
+#define VF_ADC_MAX			((1 << 12) - 1)
+
+#define COLI_TOUCH_MIN_DELAY_US		1000
+#define COLI_TOUCH_MAX_DELAY_US		2000
+#define COLI_PULLUP_MIN_DELAY_US	10000
+#define COLI_PULLUP_MAX_DELAY_US	11000
+#define COLI_TOUCH_NO_OF_AVGS		5
+#define COLI_TOUCH_REQ_ADC_CHAN		4
+
+struct vf50_touch_device {
+	struct platform_device *pdev;
+	struct input_dev *ts_input;
+	struct iio_channel *channels;
+	struct gpio_desc *gpio_xp;
+	struct gpio_desc *gpio_xm;
+	struct gpio_desc *gpio_yp;
+	struct gpio_desc *gpio_ym;
+	int pen_irq;
+	int min_pressure;
+	bool stop_touchscreen;
+};
+
+/*
+ * Enables given plates and measures touch parameters using ADC
+ */
+static int adc_ts_measure(struct iio_channel *channel,
+			  struct gpio_desc *plate_p, struct gpio_desc *plate_m)
+{
+	int i, value = 0, val = 0;
+	int error;
+
+	gpiod_set_value(plate_p, 1);
+	gpiod_set_value(plate_m, 1);
+
+	usleep_range(COLI_TOUCH_MIN_DELAY_US, COLI_TOUCH_MAX_DELAY_US);
+
+	for (i = 0; i < COLI_TOUCH_NO_OF_AVGS; i++) {
+		error = iio_read_channel_raw(channel, &val);
+		if (error < 0) {
+			value = error;
+			goto error_iio_read;
+		}
+
+		value += val;
+	}
+
+	value /= COLI_TOUCH_NO_OF_AVGS;
+
+error_iio_read:
+	gpiod_set_value(plate_p, 0);
+	gpiod_set_value(plate_m, 0);
+
+	return value;
+}
+
+/*
+ * Enable touch detection using falling edge detection on XM
+ */
+static void vf50_ts_enable_touch_detection(struct vf50_touch_device *vf50_ts)
+{
+	/* Enable plate YM (needs to be strong GND, high active) */
+	gpiod_set_value(vf50_ts->gpio_ym, 1);
+
+	/*
+	 * Let the platform mux to idle state in order to enable
+	 * Pull-Up on GPIO
+	 */
+	pinctrl_pm_select_idle_state(&vf50_ts->pdev->dev);
+
+	/* Wait for the pull-up to be stable on high */
+	usleep_range(COLI_PULLUP_MIN_DELAY_US, COLI_PULLUP_MAX_DELAY_US);
+}
+
+/*
+ * ADC touch screen sampling bottom half irq handler
+ */
+static irqreturn_t vf50_ts_irq_bh(int irq, void *private)
+{
+	struct vf50_touch_device *vf50_ts = private;
+	struct device *dev = &vf50_ts->pdev->dev;
+	int val_x, val_y, val_z1, val_z2, val_p = 0;
+	bool discard_val_on_start = true;
+
+	/* Disable the touch detection plates */
+	gpiod_set_value(vf50_ts->gpio_ym, 0);
+
+	/* Let the platform mux to default state in order to mux as ADC */
+	pinctrl_pm_select_default_state(dev);
+
+	while (!vf50_ts->stop_touchscreen) {
+		/* X-Direction */
+		val_x = adc_ts_measure(&vf50_ts->channels[0],
+				vf50_ts->gpio_xp, vf50_ts->gpio_xm);
+		if (val_x < 0)
+			break;
+
+		/* Y-Direction */
+		val_y = adc_ts_measure(&vf50_ts->channels[1],
+				vf50_ts->gpio_yp, vf50_ts->gpio_ym);
+		if (val_y < 0)
+			break;
+
+		/*
+		 * Touch pressure
+		 * Measure on XP/YM
+		 */
+		val_z1 = adc_ts_measure(&vf50_ts->channels[2],
+				vf50_ts->gpio_yp, vf50_ts->gpio_xm);
+		if (val_z1 < 0)
+			break;
+		val_z2 = adc_ts_measure(&vf50_ts->channels[3],
+				vf50_ts->gpio_yp, vf50_ts->gpio_xm);
+		if (val_z2 < 0)
+			break;
+
+		/* Validate signal (avoid calculation using noise) */
+		if (val_z1 > 64 && val_x > 64) {
+			/*
+			 * Calculate resistance between the plates
+			 * lower resistance means higher pressure
+			 */
+			int r_x = (1000 * val_x) / VF_ADC_MAX;
+
+			val_p = (r_x * val_z2) / val_z1 - r_x;
+
+		} else {
+			val_p = 2000;
+		}
+
+		val_p = 2000 - val_p;
+		dev_dbg(dev,
+			"Measured values: x: %d, y: %d, z1: %d, z2: %d, p: %d\n",
+			val_x, val_y, val_z1, val_z2, val_p);
+
+		/*
+		 * If touch pressure is too low, stop measuring and reenable
+		 * touch detection
+		 */
+		if (val_p < vf50_ts->min_pressure || val_p > 2000)
+			break;
+
+		/*
+		 * The pressure may not be enough for the first x and the
+		 * second y measurement, but, the pressure is ok when the
+		 * driver is doing the third and fourth measurement. To
+		 * take care of this, we drop the first measurement always.
+		 */
+		if (discard_val_on_start) {
+			discard_val_on_start = false;
+		} else {
+			/*
+			 * Report touch position and sleep for
+			 * the next measurement.
+			 */
+			input_report_abs(vf50_ts->ts_input,
+					ABS_X, VF_ADC_MAX - val_x);
+			input_report_abs(vf50_ts->ts_input,
+					ABS_Y, VF_ADC_MAX - val_y);
+			input_report_abs(vf50_ts->ts_input,
+					ABS_PRESSURE, val_p);
+			input_report_key(vf50_ts->ts_input, BTN_TOUCH, 1);
+			input_sync(vf50_ts->ts_input);
+		}
+
+		usleep_range(COLI_PULLUP_MIN_DELAY_US,
+			     COLI_PULLUP_MAX_DELAY_US);
+	}
+
+	/* Report no more touch, re-enable touch detection */
+	input_report_abs(vf50_ts->ts_input, ABS_PRESSURE, 0);
+	input_report_key(vf50_ts->ts_input, BTN_TOUCH, 0);
+	input_sync(vf50_ts->ts_input);
+
+	vf50_ts_enable_touch_detection(vf50_ts);
+
+	return IRQ_HANDLED;
+}
+
+static int vf50_ts_open(struct input_dev *dev_input)
+{
+	struct vf50_touch_device *touchdev = input_get_drvdata(dev_input);
+	struct device *dev = &touchdev->pdev->dev;
+
+	dev_dbg(dev, "Input device %s opened, starting touch detection\n",
+		dev_input->name);
+
+	touchdev->stop_touchscreen = false;
+
+	/* Mux detection before request IRQ, wait for pull-up to settle */
+	vf50_ts_enable_touch_detection(touchdev);
+
+	return 0;
+}
+
+static void vf50_ts_close(struct input_dev *dev_input)
+{
+	struct vf50_touch_device *touchdev = input_get_drvdata(dev_input);
+	struct device *dev = &touchdev->pdev->dev;
+
+	touchdev->stop_touchscreen = true;
+
+	/* Make sure IRQ is not running past close */
+	mb();
+	synchronize_irq(touchdev->pen_irq);
+
+	gpiod_set_value(touchdev->gpio_ym, 0);
+	pinctrl_pm_select_default_state(dev);
+
+	dev_dbg(dev, "Input device %s closed, disable touch detection\n",
+		dev_input->name);
+}
+
+static int vf50_ts_get_gpiod(struct device *dev, struct gpio_desc **gpio_d,
+			     const char *con_id, enum gpiod_flags flags)
+{
+	int error;
+
+	*gpio_d = devm_gpiod_get(dev, con_id, flags);
+	if (IS_ERR(*gpio_d)) {
+		error = PTR_ERR(*gpio_d);
+		dev_err(dev, "Could not get gpio_%s %d\n", con_id, error);
+		return error;
+	}
+
+	return 0;
+}
+
+static void vf50_ts_channel_release(void *data)
+{
+	struct iio_channel *channels = data;
+
+	iio_channel_release_all(channels);
+}
+
+static int vf50_ts_probe(struct platform_device *pdev)
+{
+	struct input_dev *input;
+	struct iio_channel *channels;
+	struct device *dev = &pdev->dev;
+	struct vf50_touch_device *touchdev;
+	int num_adc_channels;
+	int error;
+
+	channels = iio_channel_get_all(dev);
+	if (IS_ERR(channels))
+		return PTR_ERR(channels);
+
+	error = devm_add_action(dev, vf50_ts_channel_release, channels);
+	if (error) {
+		iio_channel_release_all(channels);
+		dev_err(dev, "Failed to register iio channel release action");
+		return error;
+	}
+
+	num_adc_channels = 0;
+	while (channels[num_adc_channels].indio_dev)
+		num_adc_channels++;
+
+	if (num_adc_channels != COLI_TOUCH_REQ_ADC_CHAN) {
+		dev_err(dev, "Inadequate ADC channels specified\n");
+		return -EINVAL;
+	}
+
+	touchdev = devm_kzalloc(dev, sizeof(*touchdev), GFP_KERNEL);
+	if (!touchdev)
+		return -ENOMEM;
+
+	touchdev->pdev = pdev;
+	touchdev->channels = channels;
+
+	error = of_property_read_u32(dev->of_node, "vf50-ts-min-pressure",
+				 &touchdev->min_pressure);
+	if (error)
+		return error;
+
+	input = devm_input_allocate_device(dev);
+	if (!input) {
+		dev_err(dev, "Failed to allocate TS input device\n");
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, touchdev);
+
+	input->name = DRIVER_NAME;
+	input->id.bustype = BUS_HOST;
+	input->dev.parent = dev;
+	input->open = vf50_ts_open;
+	input->close = vf50_ts_close;
+
+	input_set_capability(input, EV_KEY, BTN_TOUCH);
+	input_set_abs_params(input, ABS_X, 0, VF_ADC_MAX, 0, 0);
+	input_set_abs_params(input, ABS_Y, 0, VF_ADC_MAX, 0, 0);
+	input_set_abs_params(input, ABS_PRESSURE, 0, VF_ADC_MAX, 0, 0);
+
+	touchdev->ts_input = input;
+	input_set_drvdata(input, touchdev);
+
+	error = input_register_device(input);
+	if (error) {
+		dev_err(dev, "Failed to register input device\n");
+		return error;
+	}
+
+	error = vf50_ts_get_gpiod(dev, &touchdev->gpio_xp, "xp", GPIOD_OUT_LOW);
+	if (error)
+		return error;
+
+	error = vf50_ts_get_gpiod(dev, &touchdev->gpio_xm,
+				"xm", GPIOD_OUT_LOW);
+	if (error)
+		return error;
+
+	error = vf50_ts_get_gpiod(dev, &touchdev->gpio_yp, "yp", GPIOD_OUT_LOW);
+	if (error)
+		return error;
+
+	error = vf50_ts_get_gpiod(dev, &touchdev->gpio_ym, "ym", GPIOD_OUT_LOW);
+	if (error)
+		return error;
+
+	touchdev->pen_irq = platform_get_irq(pdev, 0);
+	if (touchdev->pen_irq < 0)
+		return touchdev->pen_irq;
+
+	error = devm_request_threaded_irq(dev, touchdev->pen_irq,
+					  NULL, vf50_ts_irq_bh, IRQF_ONESHOT,
+					  "vf50 touch", touchdev);
+	if (error) {
+		dev_err(dev, "Failed to request IRQ %d: %d\n",
+			touchdev->pen_irq, error);
+		return error;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id vf50_touch_of_match[] = {
+	{ .compatible = "toradex,vf50-touchscreen", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, vf50_touch_of_match);
+
+static struct platform_driver vf50_touch_driver = {
+	.driver = {
+		.name = "toradex,vf50_touchctrl",
+		.of_match_table = vf50_touch_of_match,
+	},
+	.probe = vf50_ts_probe,
+};
+module_platform_driver(vf50_touch_driver);
+
+MODULE_AUTHOR("Sanchayan Maity");
+MODULE_DESCRIPTION("Colibri VF50 Touchscreen driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/input/touchscreen/cyttsp4_i2c.c b/drivers/input/touchscreen/cyttsp4_i2c.c
index 9a323dd..a9f95c7 100644
--- a/drivers/input/touchscreen/cyttsp4_i2c.c
+++ b/drivers/input/touchscreen/cyttsp4_i2c.c
@@ -86,4 +86,3 @@
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) I2C driver");
 MODULE_AUTHOR("Cypress");
-MODULE_ALIAS("i2c:cyttsp4");
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index 519e2de..eee51b3 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -86,4 +86,3 @@
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) I2C driver");
 MODULE_AUTHOR("Cypress");
-MODULE_ALIAS("i2c:cyttsp");
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index ddac134..17cc20e 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -102,7 +102,7 @@
 #define ELAN_FW_PAGESIZE	132
 
 /* calibration timeout definition */
-#define ELAN_CALI_TIMEOUT_MSEC	10000
+#define ELAN_CALI_TIMEOUT_MSEC	12000
 
 #define ELAN_POWERON_DELAY_USEC	500
 #define ELAN_RESET_DELAY_MSEC	20
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
new file mode 100644
index 0000000..ff0b758
--- /dev/null
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -0,0 +1,523 @@
+/*
+ * Freescale i.MX6UL touchscreen controller driver
+ *
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio/consumer.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+/* ADC configuration registers field define */
+#define ADC_AIEN		(0x1 << 7)
+#define ADC_CONV_DISABLE	0x1F
+#define ADC_CAL			(0x1 << 7)
+#define ADC_CALF		0x2
+#define ADC_12BIT_MODE		(0x2 << 2)
+#define ADC_IPG_CLK		0x00
+#define ADC_CLK_DIV_8		(0x03 << 5)
+#define ADC_SHORT_SAMPLE_MODE	(0x0 << 4)
+#define ADC_HARDWARE_TRIGGER	(0x1 << 13)
+#define SELECT_CHANNEL_4	0x04
+#define SELECT_CHANNEL_1	0x01
+#define DISABLE_CONVERSION_INT	(0x0 << 7)
+
+/* ADC registers */
+#define REG_ADC_HC0		0x00
+#define REG_ADC_HC1		0x04
+#define REG_ADC_HC2		0x08
+#define REG_ADC_HC3		0x0C
+#define REG_ADC_HC4		0x10
+#define REG_ADC_HS		0x14
+#define REG_ADC_R0		0x18
+#define REG_ADC_CFG		0x2C
+#define REG_ADC_GC		0x30
+#define REG_ADC_GS		0x34
+
+#define ADC_TIMEOUT		msecs_to_jiffies(100)
+
+/* TSC registers */
+#define REG_TSC_BASIC_SETING	0x00
+#define REG_TSC_PRE_CHARGE_TIME	0x10
+#define REG_TSC_FLOW_CONTROL	0x20
+#define REG_TSC_MEASURE_VALUE	0x30
+#define REG_TSC_INT_EN		0x40
+#define REG_TSC_INT_SIG_EN	0x50
+#define REG_TSC_INT_STATUS	0x60
+#define REG_TSC_DEBUG_MODE	0x70
+#define REG_TSC_DEBUG_MODE2	0x80
+
+/* TSC configuration registers field define */
+#define DETECT_4_WIRE_MODE	(0x0 << 4)
+#define AUTO_MEASURE		0x1
+#define MEASURE_SIGNAL		0x1
+#define DETECT_SIGNAL		(0x1 << 4)
+#define VALID_SIGNAL		(0x1 << 8)
+#define MEASURE_INT_EN		0x1
+#define MEASURE_SIG_EN		0x1
+#define VALID_SIG_EN		(0x1 << 8)
+#define DE_GLITCH_2		(0x2 << 29)
+#define START_SENSE		(0x1 << 12)
+#define TSC_DISABLE		(0x1 << 16)
+#define DETECT_MODE		0x2
+
+struct imx6ul_tsc {
+	struct device *dev;
+	struct input_dev *input;
+	void __iomem *tsc_regs;
+	void __iomem *adc_regs;
+	struct clk *tsc_clk;
+	struct clk *adc_clk;
+	struct gpio_desc *xnur_gpio;
+
+	int measure_delay_time;
+	int pre_charge_time;
+
+	struct completion completion;
+};
+
+/*
+ * TSC module need ADC to get the measure value. So
+ * before config TSC, we should initialize ADC module.
+ */
+static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
+{
+	int adc_hc = 0;
+	int adc_gc;
+	int adc_gs;
+	int adc_cfg;
+	int timeout;
+
+	reinit_completion(&tsc->completion);
+
+	adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
+	adc_cfg |= ADC_12BIT_MODE | ADC_IPG_CLK;
+	adc_cfg |= ADC_CLK_DIV_8 | ADC_SHORT_SAMPLE_MODE;
+	adc_cfg &= ~ADC_HARDWARE_TRIGGER;
+	writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
+
+	/* enable calibration interrupt */
+	adc_hc |= ADC_AIEN;
+	adc_hc |= ADC_CONV_DISABLE;
+	writel(adc_hc, tsc->adc_regs + REG_ADC_HC0);
+
+	/* start ADC calibration */
+	adc_gc = readl(tsc->adc_regs + REG_ADC_GC);
+	adc_gc |= ADC_CAL;
+	writel(adc_gc, tsc->adc_regs + REG_ADC_GC);
+
+	timeout = wait_for_completion_timeout
+			(&tsc->completion, ADC_TIMEOUT);
+	if (timeout == 0)
+		dev_err(tsc->dev, "Timeout for adc calibration\n");
+
+	adc_gs = readl(tsc->adc_regs + REG_ADC_GS);
+	if (adc_gs & ADC_CALF)
+		dev_err(tsc->dev, "ADC calibration failed\n");
+
+	/* TSC need the ADC work in hardware trigger */
+	adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
+	adc_cfg |= ADC_HARDWARE_TRIGGER;
+	writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
+}
+
+/*
+ * This is a TSC workaround. Currently TSC misconnect two
+ * ADC channels, this function remap channel configure for
+ * hardware trigger.
+ */
+static void imx6ul_tsc_channel_config(struct imx6ul_tsc *tsc)
+{
+	int adc_hc0, adc_hc1, adc_hc2, adc_hc3, adc_hc4;
+
+	adc_hc0 = DISABLE_CONVERSION_INT;
+	writel(adc_hc0, tsc->adc_regs + REG_ADC_HC0);
+
+	adc_hc1 = DISABLE_CONVERSION_INT | SELECT_CHANNEL_4;
+	writel(adc_hc1, tsc->adc_regs + REG_ADC_HC1);
+
+	adc_hc2 = DISABLE_CONVERSION_INT;
+	writel(adc_hc2, tsc->adc_regs + REG_ADC_HC2);
+
+	adc_hc3 = DISABLE_CONVERSION_INT | SELECT_CHANNEL_1;
+	writel(adc_hc3, tsc->adc_regs + REG_ADC_HC3);
+
+	adc_hc4 = DISABLE_CONVERSION_INT;
+	writel(adc_hc4, tsc->adc_regs + REG_ADC_HC4);
+}
+
+/*
+ * TSC setting, confige the pre-charge time and measure delay time.
+ * different touch screen may need different pre-charge time and
+ * measure delay time.
+ */
+static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
+{
+	int basic_setting = 0;
+	int start;
+
+	basic_setting |= tsc->measure_delay_time << 8;
+	basic_setting |= DETECT_4_WIRE_MODE | AUTO_MEASURE;
+	writel(basic_setting, tsc->tsc_regs + REG_TSC_BASIC_SETING);
+
+	writel(DE_GLITCH_2, tsc->tsc_regs + REG_TSC_DEBUG_MODE2);
+
+	writel(tsc->pre_charge_time, tsc->tsc_regs + REG_TSC_PRE_CHARGE_TIME);
+	writel(MEASURE_INT_EN, tsc->tsc_regs + REG_TSC_INT_EN);
+	writel(MEASURE_SIG_EN | VALID_SIG_EN,
+		tsc->tsc_regs + REG_TSC_INT_SIG_EN);
+
+	/* start sense detection */
+	start = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
+	start |= START_SENSE;
+	start &= ~TSC_DISABLE;
+	writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
+}
+
+static void imx6ul_tsc_init(struct imx6ul_tsc *tsc)
+{
+	imx6ul_adc_init(tsc);
+	imx6ul_tsc_channel_config(tsc);
+	imx6ul_tsc_set(tsc);
+}
+
+static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
+{
+	int tsc_flow;
+	int adc_cfg;
+
+	/* TSC controller enters to idle status */
+	tsc_flow = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
+	tsc_flow |= TSC_DISABLE;
+	writel(tsc_flow, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
+
+	/* ADC controller enters to stop mode */
+	adc_cfg = readl(tsc->adc_regs + REG_ADC_HC0);
+	adc_cfg |= ADC_CONV_DISABLE;
+	writel(adc_cfg, tsc->adc_regs + REG_ADC_HC0);
+}
+
+/* Delay some time (max 2ms), wait the pre-charge done. */
+static bool tsc_wait_detect_mode(struct imx6ul_tsc *tsc)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(2);
+	int state_machine;
+	int debug_mode2;
+
+	do {
+		if (time_after(jiffies, timeout))
+			return false;
+
+		usleep_range(200, 400);
+		debug_mode2 = readl(tsc->tsc_regs + REG_TSC_DEBUG_MODE2);
+		state_machine = (debug_mode2 >> 20) & 0x7;
+	} while (state_machine != DETECT_MODE);
+
+	usleep_range(200, 400);
+	return true;
+}
+
+static irqreturn_t tsc_irq_fn(int irq, void *dev_id)
+{
+	struct imx6ul_tsc *tsc = dev_id;
+	int status;
+	int value;
+	int x, y;
+	int start;
+
+	status = readl(tsc->tsc_regs + REG_TSC_INT_STATUS);
+
+	/* write 1 to clear the bit measure-signal */
+	writel(MEASURE_SIGNAL | DETECT_SIGNAL,
+		tsc->tsc_regs + REG_TSC_INT_STATUS);
+
+	/* It's a HW self-clean bit. Set this bit and start sense detection */
+	start = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
+	start |= START_SENSE;
+	writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
+
+	if (status & MEASURE_SIGNAL) {
+		value = readl(tsc->tsc_regs + REG_TSC_MEASURE_VALUE);
+		x = (value >> 16) & 0x0fff;
+		y = value & 0x0fff;
+
+		/*
+		 * In detect mode, we can get the xnur gpio value,
+		 * otherwise assume contact is stiull active.
+		 */
+		if (!tsc_wait_detect_mode(tsc) ||
+		    gpiod_get_value_cansleep(tsc->xnur_gpio)) {
+			input_report_key(tsc->input, BTN_TOUCH, 1);
+			input_report_abs(tsc->input, ABS_X, x);
+			input_report_abs(tsc->input, ABS_Y, y);
+		} else {
+			input_report_key(tsc->input, BTN_TOUCH, 0);
+		}
+
+		input_sync(tsc->input);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t adc_irq_fn(int irq, void *dev_id)
+{
+	struct imx6ul_tsc *tsc = dev_id;
+	int coco;
+	int value;
+
+	coco = readl(tsc->adc_regs + REG_ADC_HS);
+	if (coco & 0x01) {
+		value = readl(tsc->adc_regs + REG_ADC_R0);
+		complete(&tsc->completion);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int imx6ul_tsc_open(struct input_dev *input_dev)
+{
+	struct imx6ul_tsc *tsc = input_get_drvdata(input_dev);
+	int err;
+
+	err = clk_prepare_enable(tsc->adc_clk);
+	if (err) {
+		dev_err(tsc->dev,
+			"Could not prepare or enable the adc clock: %d\n",
+			err);
+		return err;
+	}
+
+	err = clk_prepare_enable(tsc->tsc_clk);
+	if (err) {
+		dev_err(tsc->dev,
+			"Could not prepare or enable the tsc clock: %d\n",
+			err);
+		clk_disable_unprepare(tsc->adc_clk);
+		return err;
+	}
+
+	imx6ul_tsc_init(tsc);
+
+	return 0;
+}
+
+static void imx6ul_tsc_close(struct input_dev *input_dev)
+{
+	struct imx6ul_tsc *tsc = input_get_drvdata(input_dev);
+
+	imx6ul_tsc_disable(tsc);
+
+	clk_disable_unprepare(tsc->tsc_clk);
+	clk_disable_unprepare(tsc->adc_clk);
+}
+
+static int imx6ul_tsc_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct imx6ul_tsc *tsc;
+	struct input_dev *input_dev;
+	struct resource *tsc_mem;
+	struct resource *adc_mem;
+	int err;
+	int tsc_irq;
+	int adc_irq;
+
+	tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL);
+	if (!tsc)
+		return -ENOMEM;
+
+	input_dev = devm_input_allocate_device(&pdev->dev);
+	if (!input_dev)
+		return -ENOMEM;
+
+	input_dev->name = "iMX6UL TouchScreen Controller";
+	input_dev->id.bustype = BUS_HOST;
+
+	input_dev->open = imx6ul_tsc_open;
+	input_dev->close = imx6ul_tsc_close;
+
+	input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
+	input_set_abs_params(input_dev, ABS_X, 0, 0xFFF, 0, 0);
+	input_set_abs_params(input_dev, ABS_Y, 0, 0xFFF, 0, 0);
+
+	input_set_drvdata(input_dev, tsc);
+
+	tsc->dev = &pdev->dev;
+	tsc->input = input_dev;
+	init_completion(&tsc->completion);
+
+	tsc->xnur_gpio = devm_gpiod_get(&pdev->dev, "xnur", GPIOD_IN);
+	if (IS_ERR(tsc->xnur_gpio)) {
+		err = PTR_ERR(tsc->xnur_gpio);
+		dev_err(&pdev->dev,
+			"failed to request GPIO tsc_X- (xnur): %d\n", err);
+		return err;
+	}
+
+	tsc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	tsc->tsc_regs = devm_ioremap_resource(&pdev->dev, tsc_mem);
+	if (IS_ERR(tsc->tsc_regs)) {
+		err = PTR_ERR(tsc->tsc_regs);
+		dev_err(&pdev->dev, "failed to remap tsc memory: %d\n", err);
+		return err;
+	}
+
+	adc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	tsc->adc_regs = devm_ioremap_resource(&pdev->dev, adc_mem);
+	if (IS_ERR(tsc->adc_regs)) {
+		err = PTR_ERR(tsc->adc_regs);
+		dev_err(&pdev->dev, "failed to remap adc memory: %d\n", err);
+		return err;
+	}
+
+	tsc->tsc_clk = devm_clk_get(&pdev->dev, "tsc");
+	if (IS_ERR(tsc->tsc_clk)) {
+		err = PTR_ERR(tsc->tsc_clk);
+		dev_err(&pdev->dev, "failed getting tsc clock: %d\n", err);
+		return err;
+	}
+
+	tsc->adc_clk = devm_clk_get(&pdev->dev, "adc");
+	if (IS_ERR(tsc->adc_clk)) {
+		err = PTR_ERR(tsc->adc_clk);
+		dev_err(&pdev->dev, "failed getting adc clock: %d\n", err);
+		return err;
+	}
+
+	tsc_irq = platform_get_irq(pdev, 0);
+	if (tsc_irq < 0) {
+		dev_err(&pdev->dev, "no tsc irq resource?\n");
+		return tsc_irq;
+	}
+
+	adc_irq = platform_get_irq(pdev, 1);
+	if (adc_irq <= 0) {
+		dev_err(&pdev->dev, "no adc irq resource?\n");
+		return adc_irq;
+	}
+
+	err = devm_request_threaded_irq(tsc->dev, tsc_irq,
+					NULL, tsc_irq_fn, IRQF_ONESHOT,
+					dev_name(&pdev->dev), tsc);
+	if (err) {
+		dev_err(&pdev->dev,
+			"failed requesting tsc irq %d: %d\n",
+			tsc_irq, err);
+		return err;
+	}
+
+	err = devm_request_irq(tsc->dev, adc_irq, adc_irq_fn, 0,
+				dev_name(&pdev->dev), tsc);
+	if (err) {
+		dev_err(&pdev->dev,
+			"failed requesting adc irq %d: %d\n",
+			adc_irq, err);
+		return err;
+	}
+
+	err = of_property_read_u32(np, "measure-delay-time",
+				   &tsc->measure_delay_time);
+	if (err)
+		tsc->measure_delay_time = 0xffff;
+
+	err = of_property_read_u32(np, "pre-charge-time",
+				   &tsc->pre_charge_time);
+	if (err)
+		tsc->pre_charge_time = 0xfff;
+
+	err = input_register_device(tsc->input);
+	if (err) {
+		dev_err(&pdev->dev,
+			"failed to register input device: %d\n", err);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, tsc);
+	return 0;
+}
+
+static int __maybe_unused imx6ul_tsc_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct imx6ul_tsc *tsc = platform_get_drvdata(pdev);
+	struct input_dev *input_dev = tsc->input;
+
+	mutex_lock(&input_dev->mutex);
+
+	if (input_dev->users) {
+		imx6ul_tsc_disable(tsc);
+
+		clk_disable_unprepare(tsc->tsc_clk);
+		clk_disable_unprepare(tsc->adc_clk);
+	}
+
+	mutex_unlock(&input_dev->mutex);
+
+	return 0;
+}
+
+static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct imx6ul_tsc *tsc = platform_get_drvdata(pdev);
+	struct input_dev *input_dev = tsc->input;
+	int retval = 0;
+
+	mutex_lock(&input_dev->mutex);
+
+	if (input_dev->users) {
+		retval = clk_prepare_enable(tsc->adc_clk);
+		if (retval)
+			goto out;
+
+		retval = clk_prepare_enable(tsc->tsc_clk);
+		if (retval) {
+			clk_disable_unprepare(tsc->adc_clk);
+			goto out;
+		}
+
+		imx6ul_tsc_init(tsc);
+	}
+
+out:
+	mutex_unlock(&input_dev->mutex);
+	return retval;
+}
+
+static SIMPLE_DEV_PM_OPS(imx6ul_tsc_pm_ops,
+			 imx6ul_tsc_suspend, imx6ul_tsc_resume);
+
+static const struct of_device_id imx6ul_tsc_match[] = {
+	{ .compatible = "fsl,imx6ul-tsc", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx6ul_tsc_match);
+
+static struct platform_driver imx6ul_tsc_driver = {
+	.driver		= {
+		.name	= "imx6ul-tsc",
+		.of_match_table	= imx6ul_tsc_match,
+		.pm	= &imx6ul_tsc_pm_ops,
+	},
+	.probe		= imx6ul_tsc_probe,
+};
+module_platform_driver(imx6ul_tsc_driver);
+
+MODULE_AUTHOR("Haibo Chen <haibo.chen@freescale.com>");
+MODULE_DESCRIPTION("Freescale i.MX6UL Touchscreen controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index c011699..4857943 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -191,7 +191,7 @@
 	writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
 }
 
-static int sun4i_get_temp(const struct sun4i_ts_data *ts, long *temp)
+static int sun4i_get_temp(const struct sun4i_ts_data *ts, int *temp)
 {
 	/* No temp_data until the first irq */
 	if (ts->temp_data == -1)
@@ -202,7 +202,7 @@
 	return 0;
 }
 
-static int sun4i_get_tz_temp(void *data, long *temp)
+static int sun4i_get_tz_temp(void *data, int *temp)
 {
 	return sun4i_get_temp(data, temp);
 }
@@ -215,14 +215,14 @@
 			 char *buf)
 {
 	struct sun4i_ts_data *ts = dev_get_drvdata(dev);
-	long temp;
+	int temp;
 	int error;
 
 	error = sun4i_get_temp(ts, &temp);
 	if (error)
 		return error;
 
-	return sprintf(buf, "%ld\n", temp);
+	return sprintf(buf, "%d\n", temp);
 }
 
 static ssize_t show_temp_label(struct device *dev,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 4664c2a..d9da766 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -43,7 +43,7 @@
 endmenu
 
 config IOMMU_IOVA
-	bool
+	tristate
 
 config OF_IOMMU
        def_bool y
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2d7349a..041bc18 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3215,6 +3215,8 @@
 
 	/* Restrict dma_mask to the width that the iommu can handle */
 	dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+	/* Ensure we reserve the whole size-aligned region */
+	nrpages = __roundup_pow_of_two(nrpages);
 
 	if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
 		/*
@@ -3711,7 +3713,7 @@
 static int __init iommu_init_mempool(void)
 {
 	int ret;
-	ret = iommu_iova_cache_init();
+	ret = iova_cache_get();
 	if (ret)
 		return ret;
 
@@ -3725,7 +3727,7 @@
 
 	kmem_cache_destroy(iommu_domain_cache);
 domain_error:
-	iommu_iova_cache_destroy();
+	iova_cache_put();
 
 	return -ENOMEM;
 }
@@ -3734,7 +3736,7 @@
 {
 	kmem_cache_destroy(iommu_devinfo_cache);
 	kmem_cache_destroy(iommu_domain_cache);
-	iommu_iova_cache_destroy();
+	iova_cache_put();
 }
 
 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b7c3d92..fa0adef 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -18,42 +18,9 @@
  */
 
 #include <linux/iova.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 
-static struct kmem_cache *iommu_iova_cache;
-
-int iommu_iova_cache_init(void)
-{
-	int ret = 0;
-
-	iommu_iova_cache = kmem_cache_create("iommu_iova",
-					 sizeof(struct iova),
-					 0,
-					 SLAB_HWCACHE_ALIGN,
-					 NULL);
-	if (!iommu_iova_cache) {
-		pr_err("Couldn't create iova cache\n");
-		ret = -ENOMEM;
-	}
-
-	return ret;
-}
-
-void iommu_iova_cache_destroy(void)
-{
-	kmem_cache_destroy(iommu_iova_cache);
-}
-
-struct iova *alloc_iova_mem(void)
-{
-	return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
-}
-
-void free_iova_mem(struct iova *iova)
-{
-	kmem_cache_free(iommu_iova_cache, iova);
-}
-
 void
 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
 	unsigned long start_pfn, unsigned long pfn_32bit)
@@ -72,6 +39,7 @@
 	iovad->start_pfn = start_pfn;
 	iovad->dma_32bit_pfn = pfn_32bit;
 }
+EXPORT_SYMBOL_GPL(init_iova_domain);
 
 static struct rb_node *
 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
@@ -120,19 +88,14 @@
 	}
 }
 
-/* Computes the padding size required, to make the
- * the start address naturally aligned on its size
+/*
+ * Computes the padding size required, to make the start address
+ * naturally aligned on the power-of-two order of its size
  */
-static int
-iova_get_pad_size(int size, unsigned int limit_pfn)
+static unsigned int
+iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
 {
-	unsigned int pad_size = 0;
-	unsigned int order = ilog2(size);
-
-	if (order)
-		pad_size = (limit_pfn + 1) % (1 << order);
-
-	return pad_size;
+	return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
 }
 
 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
@@ -242,6 +205,57 @@
 	rb_insert_color(&iova->node, root);
 }
 
+static struct kmem_cache *iova_cache;
+static unsigned int iova_cache_users;
+static DEFINE_MUTEX(iova_cache_mutex);
+
+struct iova *alloc_iova_mem(void)
+{
+	return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(alloc_iova_mem);
+
+void free_iova_mem(struct iova *iova)
+{
+	kmem_cache_free(iova_cache, iova);
+}
+EXPORT_SYMBOL(free_iova_mem);
+
+int iova_cache_get(void)
+{
+	mutex_lock(&iova_cache_mutex);
+	if (!iova_cache_users) {
+		iova_cache = kmem_cache_create(
+			"iommu_iova", sizeof(struct iova), 0,
+			SLAB_HWCACHE_ALIGN, NULL);
+		if (!iova_cache) {
+			mutex_unlock(&iova_cache_mutex);
+			printk(KERN_ERR "Couldn't create iova cache\n");
+			return -ENOMEM;
+		}
+	}
+
+	iova_cache_users++;
+	mutex_unlock(&iova_cache_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iova_cache_get);
+
+void iova_cache_put(void)
+{
+	mutex_lock(&iova_cache_mutex);
+	if (WARN_ON(!iova_cache_users)) {
+		mutex_unlock(&iova_cache_mutex);
+		return;
+	}
+	iova_cache_users--;
+	if (!iova_cache_users)
+		kmem_cache_destroy(iova_cache);
+	mutex_unlock(&iova_cache_mutex);
+}
+EXPORT_SYMBOL_GPL(iova_cache_put);
+
 /**
  * alloc_iova - allocates an iova
  * @iovad: - iova domain in question
@@ -265,12 +279,6 @@
 	if (!new_iova)
 		return NULL;
 
-	/* If size aligned is set then round the size to
-	 * to next power of two.
-	 */
-	if (size_aligned)
-		size = __roundup_pow_of_two(size);
-
 	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
 			new_iova, size_aligned);
 
@@ -281,6 +289,7 @@
 
 	return new_iova;
 }
+EXPORT_SYMBOL_GPL(alloc_iova);
 
 /**
  * find_iova - find's an iova for a given pfn
@@ -321,6 +330,7 @@
 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(find_iova);
 
 /**
  * __free_iova - frees the given iova
@@ -339,6 +349,7 @@
 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 	free_iova_mem(iova);
 }
+EXPORT_SYMBOL_GPL(__free_iova);
 
 /**
  * free_iova - finds and frees the iova for a given pfn
@@ -356,6 +367,7 @@
 		__free_iova(iovad, iova);
 
 }
+EXPORT_SYMBOL_GPL(free_iova);
 
 /**
  * put_iova_domain - destroys the iova doamin
@@ -378,6 +390,7 @@
 	}
 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 }
+EXPORT_SYMBOL_GPL(put_iova_domain);
 
 static int
 __is_range_overlap(struct rb_node *node,
@@ -467,6 +480,7 @@
 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 	return iova;
 }
+EXPORT_SYMBOL_GPL(reserve_iova);
 
 /**
  * copy_reserved_iova - copies the reserved between domains
@@ -493,6 +507,7 @@
 	}
 	spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
 }
+EXPORT_SYMBOL_GPL(copy_reserved_iova);
 
 struct iova *
 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
@@ -534,3 +549,6 @@
 		free_iova_mem(prev);
 	return NULL;
 }
+
+MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 0717aa9..9bc20e2 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -135,8 +135,9 @@
 static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
 			     struct seq_file *s)
 {
-	return seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram,
+	seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram,
 			  (cr->cam & MMU_CAM_P) ? 1 : 0);
+	return 0;
 }
 
 static size_t omap_dump_tlb_entries(struct omap_iommu *obj, struct seq_file *s)
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index e9c6f2a..cd7d3bc 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -65,12 +65,10 @@
 	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
 }
 
-static void combiner_handle_cascade_irq(unsigned int __irq,
-					struct irq_desc *desc)
+static void combiner_handle_cascade_irq(struct irq_desc *desc)
 {
 	struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
-	unsigned int irq = irq_desc_get_irq(desc);
 	unsigned int cascade_irq, combiner_irq;
 	unsigned long status;
 
@@ -88,7 +86,7 @@
 	cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
 
 	if (unlikely(!cascade_irq))
-		handle_bad_irq(irq, desc);
+		handle_bad_irq(desc);
 	else
 		generic_handle_irq(cascade_irq);
 
@@ -165,7 +163,7 @@
 
 	irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
 	irq_set_chip_data(irq, &combiner_data[hw >> 3]);
-	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	irq_set_probe(irq);
 
 	return 0;
 }
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 39b72da..9d89900 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -56,9 +56,6 @@
 
 #define ARMADA_370_XP_MAX_PER_CPU_IRQS		(28)
 
-#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ	(5)
-#define ARMADA_370_XP_FABRIC_IRQ		(3)
-
 #define IPI_DOORBELL_START                      (0)
 #define IPI_DOORBELL_END                        (8)
 #define IPI_DOORBELL_MASK                       0xFF
@@ -81,13 +78,10 @@
 
 static inline bool is_percpu_irq(irq_hw_number_t irq)
 {
-	switch (irq) {
-	case ARMADA_370_XP_TIMER0_PER_CPU_IRQ:
-	case ARMADA_370_XP_FABRIC_IRQ:
+	if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
 		return true;
-	default:
-		return false;
-	}
+
+	return false;
 }
 
 /*
@@ -200,7 +194,6 @@
 {
 	irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
 				 handle_simple_irq);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
@@ -317,7 +310,7 @@
 		irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
 					handle_level_irq);
 	}
-	set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
+	irq_set_probe(virq);
 
 	return 0;
 }
@@ -447,8 +440,7 @@
 static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
 #endif
 
-static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
-						  struct irq_desc *desc)
+static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned long irqmap, irqn, irqsrc, cpuid;
@@ -551,7 +543,7 @@
 		if (virq == 0)
 			continue;
 
-		if (irq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+		if (!is_percpu_irq(irq))
 			writel(irq, per_cpu_int_base +
 			       ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
 		else
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 9da9942..f6d6804 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -88,28 +88,36 @@
 {
 	struct irq_domain *domain = d->domain;
 	struct irq_domain_chip_generic *dgc = domain->gc;
-	struct irq_chip_generic *gc = dgc->gc[0];
+	struct irq_chip_generic *bgc = dgc->gc[0];
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 
-	/* Disable interrupt on AIC5 */
-	irq_gc_lock(gc);
+	/*
+	 * Disable interrupt on AIC5. We always take the lock of the
+	 * first irq chip as all chips share the same registers.
+	 */
+	irq_gc_lock(bgc);
 	irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
 	irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
 	gc->mask_cache &= ~d->mask;
-	irq_gc_unlock(gc);
+	irq_gc_unlock(bgc);
 }
 
 static void aic5_unmask(struct irq_data *d)
 {
 	struct irq_domain *domain = d->domain;
 	struct irq_domain_chip_generic *dgc = domain->gc;
-	struct irq_chip_generic *gc = dgc->gc[0];
+	struct irq_chip_generic *bgc = dgc->gc[0];
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 
-	/* Enable interrupt on AIC5 */
-	irq_gc_lock(gc);
+	/*
+	 * Enable interrupt on AIC5. We always take the lock of the
+	 * first irq chip as all chips share the same registers.
+	 */
+	irq_gc_lock(bgc);
 	irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
 	irq_reg_writel(gc, 1, AT91_AIC5_IECR);
 	gc->mask_cache |= d->mask;
-	irq_gc_unlock(gc);
+	irq_gc_unlock(bgc);
 }
 
 static int aic5_retrigger(struct irq_data *d)
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index ed4ca9d..bf9cc5f 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -96,7 +96,7 @@
 static struct armctrl_ic intc __read_mostly;
 static void __exception_irq_entry bcm2835_handle_irq(
 	struct pt_regs *regs);
-static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc);
+static void bcm2836_chained_handle_irq(struct irq_desc *desc);
 
 static void armctrl_mask_irq(struct irq_data *d)
 {
@@ -166,7 +166,7 @@
 			BUG_ON(irq <= 0);
 			irq_set_chip_and_handler(irq, &armctrl_chip,
 				handle_level_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_set_probe(irq);
 		}
 	}
 
@@ -245,7 +245,7 @@
 		handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
 }
 
-static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc)
+static void bcm2836_chained_handle_irq(struct irq_desc *desc)
 {
 	u32 hwirq;
 
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 409bdc6..0fea985 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -115,7 +115,7 @@
 		writel(val, reg);
 }
 
-static void bcm7038_l1_irq_handle(unsigned int irq, struct irq_desc *desc)
+static void bcm7038_l1_irq_handle(struct irq_desc *desc)
 {
 	struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc);
 	struct bcm7038_l1_cpu *cpu;
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index d3f9769..61b18ab 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -56,7 +56,7 @@
 	const __be32 *map_mask_prop;
 };
 
-static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
+static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc)
 {
 	struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc);
 	struct bcm7120_l2_intc_data *b = data->b;
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index aedda06..65cd341 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -49,13 +49,12 @@
 	u32 saved_mask; /* for suspend/resume */
 };
 
-static void brcmstb_l2_intc_irq_handle(unsigned int __irq,
-				       struct irq_desc *desc)
+static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
 {
 	struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
 	struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
-	unsigned int irq = irq_desc_get_irq(desc);
+	unsigned int irq;
 	u32 status;
 
 	chained_irq_enter(chip, desc);
@@ -65,7 +64,7 @@
 
 	if (status == 0) {
 		raw_spin_lock(&desc->lock);
-		handle_bad_irq(irq, desc);
+		handle_bad_irq(desc);
 		raw_spin_unlock(&desc->lock);
 		goto out;
 	}
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index 2dd929e..eb5eb0c 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -132,14 +132,14 @@
 					irq_hw_number_t hw)
 {
 	irq_flow_handler_t handler = handle_level_irq;
-	unsigned int flags = IRQF_VALID | IRQF_PROBE;
+	unsigned int flags = 0;
 
 	if (!clps711x_irqs[hw].flags)
 		return 0;
 
 	if (clps711x_irqs[hw].flags & CLPS711X_FLAG_FIQ) {
 		handler = handle_bad_irq;
-		flags |= IRQF_NOAUTOEN;
+		flags |= IRQ_NOAUTOEN;
 	} else if (clps711x_irqs[hw].eoi) {
 		handler = handle_fasteoi_irq;
 	}
@@ -149,7 +149,7 @@
 		writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hw].eoi);
 
 	irq_set_chip_and_handler(virq, &clps711x_intc_chip, handler);
-	set_irq_flags(virq, flags);
+	irq_modify_status(virq, IRQ_NOPROBE, flags);
 
 	return 0;
 }
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index efd95d9..052f266 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -26,7 +26,7 @@
 #define APB_INT_FINALSTATUS_H	0x34
 #define APB_INT_BASE_OFFSET	0x04
 
-static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc)
+static void dw_apb_ictl_handler(struct irq_desc *desc)
 {
 	struct irq_domain *d = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index db04fc1..12985da 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -95,8 +95,8 @@
 	struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
 	phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
 
-	msg->address_hi = (u32) (addr >> 32);
-	msg->address_lo = (u32) (addr);
+	msg->address_hi = upper_32_bits(addr);
+	msg->address_lo = lower_32_bits(addr);
 	msg->data = data->hwirq;
 }
 
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 26b55c5..ac7ae2b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -898,8 +898,10 @@
 			 * non-cacheable as well.
 			 */
 			shr = tmp & GITS_BASER_SHAREABILITY_MASK;
-			if (!shr)
+			if (!shr) {
 				cache = GITS_BASER_nC;
+				__flush_dcache_area(base, alloc_size);
+			}
 			goto retry_baser;
 		}
 
@@ -1140,6 +1142,8 @@
 		return NULL;
 	}
 
+	__flush_dcache_area(itt, sz);
+
 	dev->its = its;
 	dev->itt = itt;
 	dev->nr_ites = nr_ites;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 7deed6e..36ecfc8 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -70,11 +70,6 @@
 	return gic_irq(d) < 32;
 }
 
-static inline bool forwarded_irq(struct irq_data *d)
-{
-	return d->handler_data != NULL;
-}
-
 static inline void __iomem *gic_dist_base(struct irq_data *d)
 {
 	if (gic_irq_in_rdist(d))	/* SGI+PPI -> SGI_base for this CPU */
@@ -249,7 +244,7 @@
 	 * disabled/masked will not get "stuck", because there is
 	 * noone to deactivate it (guest is being terminated).
 	 */
-	if (forwarded_irq(d))
+	if (irqd_is_forwarded_to_vcpu(d))
 		gic_poke_irq(d, GICD_ICACTIVER);
 }
 
@@ -324,7 +319,7 @@
 	 * No need to deactivate an LPI, or an interrupt that
 	 * is is getting forwarded to a vcpu.
 	 */
-	if (gic_irq(d) >= 8192 || forwarded_irq(d))
+	if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
 		return;
 	gic_write_dir(gic_irq(d));
 }
@@ -357,7 +352,10 @@
 
 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
 {
-	d->handler_data = vcpu;
+	if (vcpu)
+		irqd_set_forwarded_to_vcpu(d);
+	else
+		irqd_clr_forwarded_to_vcpu(d);
 	return 0;
 }
 
@@ -754,13 +752,13 @@
 		irq_set_percpu_devid(irq);
 		irq_domain_set_info(d, irq, hw, chip, d->host_data,
 				    handle_percpu_devid_irq, NULL, NULL);
-		set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+		irq_set_status_flags(irq, IRQ_NOAUTOEN);
 	}
 	/* SPIs */
 	if (hw >= 32 && hw < gic_data.irq_nr) {
 		irq_domain_set_info(d, irq, hw, chip, d->host_data,
 				    handle_fasteoi_irq, NULL, NULL);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_set_probe(irq);
 	}
 	/* LPIs */
 	if (hw >= 8192 && hw < GIC_ID_NR) {
@@ -768,7 +766,6 @@
 			return -EPERM;
 		irq_domain_set_info(d, irq, hw, chip, d->host_data,
 				    handle_fasteoi_irq, NULL, NULL);
-		set_irq_flags(irq, IRQF_VALID);
 	}
 
 	return 0;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index e6b7ed5..982c09c 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -145,29 +145,10 @@
 	void *data = irq_data_get_irq_handler_data(d);
 
 	/*
-	 * If handler_data pointing to one of the secondary GICs, then
-	 * this is a cascading interrupt, and it cannot possibly be
-	 * forwarded.
+	 * If handler_data is set, this is a cascading interrupt, and
+	 * it cannot possibly be forwarded.
 	 */
-	if (data >= (void *)(gic_data + 1) &&
-	    data <  (void *)(gic_data + MAX_GIC_NR))
-		return true;
-
-	return false;
-}
-
-static inline bool forwarded_irq(struct irq_data *d)
-{
-	/*
-	 * A forwarded interrupt:
-	 * - is on the primary GIC
-	 * - has its handler_data set to a value
-	 * - that isn't a secondary GIC
-	 */
-	if (d->handler_data && !cascading_gic_irq(d))
-		return true;
-
-	return false;
+	return data != NULL;
 }
 
 /*
@@ -201,7 +182,7 @@
 	 * disabled/masked will not get "stuck", because there is
 	 * noone to deactivate it (guest is being terminated).
 	 */
-	if (forwarded_irq(d))
+	if (irqd_is_forwarded_to_vcpu(d))
 		gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
 }
 
@@ -218,7 +199,7 @@
 static void gic_eoimode1_eoi_irq(struct irq_data *d)
 {
 	/* Do not deactivate an IRQ forwarded to a vcpu. */
-	if (forwarded_irq(d))
+	if (irqd_is_forwarded_to_vcpu(d))
 		return;
 
 	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
@@ -296,7 +277,10 @@
 	if (cascading_gic_irq(d))
 		return -EINVAL;
 
-	d->handler_data = vcpu;
+	if (vcpu)
+		irqd_set_forwarded_to_vcpu(d);
+	else
+		irqd_clr_forwarded_to_vcpu(d);
 	return 0;
 }
 
@@ -357,7 +341,7 @@
 	} while (1);
 }
 
-static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+static void gic_handle_cascade_irq(struct irq_desc *desc)
 {
 	struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -376,7 +360,7 @@
 
 	cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
 	if (unlikely(gic_irq < 32 || gic_irq > 1020))
-		handle_bad_irq(cascade_irq, desc);
+		handle_bad_irq(desc);
 	else
 		generic_handle_irq(cascade_irq);
 
@@ -906,11 +890,11 @@
 		irq_set_percpu_devid(irq);
 		irq_domain_set_info(d, irq, hw, chip, d->host_data,
 				    handle_percpu_devid_irq, NULL, NULL);
-		set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+		irq_set_status_flags(irq, IRQ_NOAUTOEN);
 	} else {
 		irq_domain_set_info(d, irq, hw, chip, d->host_data,
 				    handle_fasteoi_irq, NULL, NULL);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_set_probe(irq);
 	}
 	return 0;
 }
@@ -1119,12 +1103,49 @@
 #ifdef CONFIG_OF
 static int gic_cnt __initdata;
 
+static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
+{
+	struct resource cpuif_res;
+
+	of_address_to_resource(node, 1, &cpuif_res);
+
+	if (!is_hyp_mode_available())
+		return false;
+	if (resource_size(&cpuif_res) < SZ_8K)
+		return false;
+	if (resource_size(&cpuif_res) == SZ_128K) {
+		u32 val_low, val_high;
+
+		/*
+		 * Verify that we have the first 4kB of a GIC400
+		 * aliased over the first 64kB by checking the
+		 * GICC_IIDR register on both ends.
+		 */
+		val_low = readl_relaxed(*base + GIC_CPU_IDENT);
+		val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000);
+		if ((val_low & 0xffff0fff) != 0x0202043B ||
+		    val_low != val_high)
+			return false;
+
+		/*
+		 * Move the base up by 60kB, so that we have a 8kB
+		 * contiguous region, which allows us to use GICC_DIR
+		 * at its normal offset. Please pass me that bucket.
+		 */
+		*base += 0xf000;
+		cpuif_res.start += 0xf000;
+		pr_warn("GIC: Adjusting CPU interface base to %pa",
+			&cpuif_res.start);
+	}
+
+	return true;
+}
+
 static int __init
 gic_of_init(struct device_node *node, struct device_node *parent)
 {
 	void __iomem *cpu_base;
 	void __iomem *dist_base;
-	struct resource cpu_res;
 	u32 percpu_offset;
 	int irq;
 
@@ -1137,14 +1158,11 @@
 	cpu_base = of_iomap(node, 1);
 	WARN(!cpu_base, "unable to map gic cpu registers\n");
 
-	of_address_to_resource(node, 1, &cpu_res);
-
 	/*
 	 * Disable split EOI/Deactivate if either HYP is not available
 	 * or the CPU interface is too small.
 	 */
-	if (gic_cnt == 0 && (!is_hyp_mode_available() ||
-			     resource_size(&cpu_res) < SZ_8K))
+	if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base))
 		static_key_slow_dec(&supports_deactivate);
 
 	if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index a0128c7..8f3ca8f 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -307,11 +307,11 @@
 		irq_set_percpu_devid(irq);
 		irq_set_chip_and_handler(irq, &hip04_irq_chip,
 					 handle_percpu_devid_irq);
-		set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+		irq_set_status_flags(irq, IRQ_NOAUTOEN);
 	} else {
 		irq_set_chip_and_handler(irq, &hip04_irq_chip,
 					 handle_fasteoi_irq);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_set_probe(irq);
 	}
 	irq_set_chip_data(irq, d->host_data);
 	return 0;
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index 4836102..e484fd2 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -352,7 +352,7 @@
 	__init_i8259_irqs(NULL);
 }
 
-static void i8259_irq_dispatch(unsigned int __irq, struct irq_desc *desc)
+static void i8259_irq_dispatch(struct irq_desc *desc)
 {
 	struct irq_domain *domain = irq_desc_get_handler_data(desc);
 	int hwirq = i8259_irq();
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index 841604b..c02d29c 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -218,7 +218,7 @@
 	return 0;
 }
 
-static void pdc_intc_perip_isr(unsigned int __irq, struct irq_desc *desc)
+static void pdc_intc_perip_isr(struct irq_desc *desc)
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 	struct pdc_intc_priv *priv;
@@ -240,7 +240,7 @@
 	generic_handle_irq(irq_no);
 }
 
-static void pdc_intc_syswake_isr(unsigned int irq, struct irq_desc *desc)
+static void pdc_intc_syswake_isr(struct irq_desc *desc)
 {
 	struct pdc_intc_priv *priv;
 	unsigned int syswake, irq_no;
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index c151726..deb89d6 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -83,7 +83,7 @@
 	/* nothing to do here */
 }
 
-static void keystone_irq_handler(unsigned __irq, struct irq_desc *desc)
+static void keystone_irq_handler(struct irq_desc *desc)
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 	struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
@@ -127,7 +127,7 @@
 
 	irq_set_chip_data(virq, kirq);
 	irq_set_chip_and_handler(virq, &kirq->chip, handle_level_irq);
-	set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
+	irq_set_probe(virq);
 	return 0;
 }
 
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 5f4c529..8c38b3d 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -446,7 +446,7 @@
  * Whilst using TR2 to detect external interrupts is a software convention it is
  * (hopefully) unlikely to change.
  */
-static void meta_intc_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void meta_intc_irq_demux(struct irq_desc *desc)
 {
 	struct meta_intc_priv *priv = &meta_intc_priv;
 	irq_hw_number_t hw;
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
index 3d23ce3..a5f053b 100644
--- a/drivers/irqchip/irq-metag.c
+++ b/drivers/irqchip/irq-metag.c
@@ -220,7 +220,7 @@
  *	occurred. It is this function's job to demux this irq and
  *	figure out exactly which trigger needs servicing.
  */
-static void metag_internal_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void metag_internal_irq_demux(struct irq_desc *desc)
 {
 	struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc);
 	irq_hw_number_t hw;
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 1764bcf..aeaa061 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -320,6 +320,14 @@
 		intrmask[i] = gic_read(intrmask_reg);
 		pending_reg += gic_reg_step;
 		intrmask_reg += gic_reg_step;
+
+		if (!config_enabled(CONFIG_64BIT) || mips_cm_is64)
+			continue;
+
+		pending[i] |= (u64)gic_read(pending_reg) << 32;
+		intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
+		pending_reg += gic_reg_step;
+		intrmask_reg += gic_reg_step;
 	}
 
 	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
@@ -426,7 +434,7 @@
 	spin_lock_irqsave(&gic_lock, flags);
 
 	/* Re-route this IRQ */
-	gic_map_to_vpe(irq, cpumask_first(&tmp));
+	gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
 
 	/* Update the pcpu_masks */
 	for (i = 0; i < NR_CPUS; i++)
@@ -546,7 +554,7 @@
 	gic_handle_shared_int(false);
 }
 
-static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+static void gic_irq_dispatch(struct irq_desc *desc)
 {
 	gic_handle_local_int(true);
 	gic_handle_shared_int(true);
@@ -599,7 +607,7 @@
 				      GIC_SHARED_TO_HWIRQ(intr));
 	int i;
 
-	gic_map_to_vpe(intr, cpu);
+	gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
 	for (i = 0; i < NR_CPUS; i++)
 		clear_bit(intr, pcpu_masks[i].pcpu_mask);
 	set_bit(intr, pcpu_masks[cpu].pcpu_mask);
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 781ed6e..013fc96 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -129,7 +129,7 @@
 	.irq_unmask	= icu_unmask_irq,
 };
 
-static void icu_mux_irq_demux(unsigned int __irq, struct irq_desc *desc)
+static void icu_mux_irq_demux(struct irq_desc *desc)
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 	struct irq_domain *domain;
@@ -164,7 +164,6 @@
 			      irq_hw_number_t hw)
 {
 	irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
-	set_irq_flags(irq, IRQF_VALID);
 	return 0;
 }
 
@@ -234,7 +233,6 @@
 	for (irq = 0; irq < 64; irq++) {
 		icu_mask_irq(irq_get_irq_data(irq));
 		irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
-		set_irq_flags(irq, IRQF_VALID);
 	}
 	irq_set_default_host(icu_data[0].domain);
 	set_handle_irq(mmp_handle_irq);
@@ -337,7 +335,6 @@
 			irq_set_chip_and_handler(irq, &icu_irq_chip,
 						 handle_level_irq);
 		}
-		set_irq_flags(irq, IRQF_VALID);
 	}
 	irq_set_default_host(icu_data[0].domain);
 	set_handle_irq(mmp2_handle_irq);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 1faf812..604df63 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -84,7 +84,6 @@
 				irq_hw_number_t hw)
 {
 	irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index 5ea999a..be4c5a8 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -106,7 +106,7 @@
 #define ORION_BRIDGE_IRQ_CAUSE	0x00
 #define ORION_BRIDGE_IRQ_MASK	0x04
 
-static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void orion_bridge_irq_handler(struct irq_desc *desc)
 {
 	struct irq_domain *d = irq_desc_get_handler_data(desc);
 
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 0670ab4..9525335 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -283,6 +283,9 @@
 static int intc_irqpin_irq_set_wake(struct irq_data *d, unsigned int on)
 {
 	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+	int hw_irq = irqd_to_hwirq(d);
+
+	irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
 
 	if (!p->clk)
 		return 0;
@@ -332,6 +335,12 @@
 	return status;
 }
 
+/*
+ * This lock class tells lockdep that INTC External IRQ Pin irqs are in a
+ * different category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key intc_irqpin_irq_lock_class;
+
 static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
 				      irq_hw_number_t hw)
 {
@@ -342,8 +351,8 @@
 
 	intc_irqpin_dbg(&p->irq[hw], "map");
 	irq_set_chip_data(virq, h->host_data);
+	irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class);
 	irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
-	set_irq_flags(virq, IRQF_VALID); /* kill me now */
 	return 0;
 }
 
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 2aa3add7..35bf97b 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -121,6 +121,9 @@
 static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
 {
 	struct irqc_priv *p = irq_data_get_irq_chip_data(d);
+	int hw_irq = irqd_to_hwirq(d);
+
+	irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
 
 	if (!p->clk)
 		return 0;
@@ -150,6 +153,12 @@
 	return IRQ_NONE;
 }
 
+/*
+ * This lock class tells lockdep that IRQC irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key irqc_irq_lock_class;
+
 static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq,
 			       irq_hw_number_t hw)
 {
@@ -157,6 +166,7 @@
 
 	irqc_dbg(&p->irq[hw], "map");
 	irq_set_chip_data(virq, h->host_data);
+	irq_set_lockdep_class(virq, &irqc_irq_lock_class);
 	irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
 	return 0;
 }
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index 506d9f2..7154b01 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -298,7 +298,7 @@
 	.irq_set_type	= s3c_irqext0_type,
 };
 
-static void s3c_irq_demux(unsigned int __irq, struct irq_desc *desc)
+static void s3c_irq_demux(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc);
@@ -466,13 +466,11 @@
 
 	irq_set_chip_data(virq, irq_data);
 
-	set_irq_flags(virq, IRQF_VALID);
-
 	if (parent_intc && irq_data->type != S3C_IRQTYPE_NONE) {
 		if (irq_data->parent_irq > 31) {
 			pr_err("irq-s3c24xx: parent irq %lu is out of range\n",
 			       irq_data->parent_irq);
-			goto err;
+			return -EINVAL;
 		}
 
 		parent_irq_data = &parent_intc->irqs[irq_data->parent_irq];
@@ -485,18 +483,12 @@
 		if (!irqno) {
 			pr_err("irq-s3c24xx: could not find mapping for parent irq %lu\n",
 			       irq_data->parent_irq);
-			goto err;
+			return -EINVAL;
 		}
 		irq_set_chained_handler(irqno, s3c_irq_demux);
 	}
 
 	return 0;
-
-err:
-	set_irq_flags(virq, 0);
-
-	/* the only error can result from bad mapping data*/
-	return -EINVAL;
 }
 
 static const struct irq_domain_ops s3c24xx_irq_ops = {
@@ -1174,8 +1166,6 @@
 
 	irq_set_chip_data(virq, irq_data);
 
-	set_irq_flags(virq, IRQF_VALID);
-
 	return 0;
 }
 
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 4ad3e7c..0704362 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -83,7 +83,7 @@
 			 irq_hw_number_t hw)
 {
 	irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq);
-	set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
+	irq_set_probe(virq);
 
 	return 0;
 }
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 772a82c..c143dd5 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -58,7 +58,7 @@
 	return irq_reg_readl(gc, off);
 }
 
-static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc)
+static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc)
 {
 	struct irq_domain *domain = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index 3318296..848d782 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -97,7 +97,7 @@
 	return IRQ_SET_MASK_OK;
 }
 
-static void tb10x_irq_cascade(unsigned int __irq, struct irq_desc *desc)
+static void tb10x_irq_cascade(struct irq_desc *desc)
 {
 	struct irq_domain *domain = irq_desc_get_handler_data(desc);
 	unsigned int irq = irq_desc_get_irq(desc);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 16123f6..598ab3f 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -65,19 +65,19 @@
 	writel(mask, f->base + IRQ_ENABLE_SET);
 }
 
-static void fpga_irq_handle(unsigned int __irq, struct irq_desc *desc)
+static void fpga_irq_handle(struct irq_desc *desc)
 {
 	struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
-	unsigned int irq = irq_desc_get_irq(desc);
 	u32 status = readl(f->base + IRQ_STATUS);
 
 	if (status == 0) {
-		do_bad_IRQ(irq, desc);
+		do_bad_IRQ(desc);
 		return;
 	}
 
 	do {
-		irq = ffs(status) - 1;
+		unsigned int irq = ffs(status) - 1;
+
 		status &= ~(1 << irq);
 		generic_handle_irq(irq_find_mapping(f->domain, irq));
 	} while (status);
@@ -128,7 +128,7 @@
 	irq_set_chip_data(irq, f);
 	irq_set_chip_and_handler(irq, &f->chip,
 				handle_level_irq);
-	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	irq_set_probe(irq);
 	return 0;
 }
 
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 03846df..b956dff 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -201,7 +201,7 @@
 		return -EPERM;
 	irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
 	irq_set_chip_data(irq, v->base);
-	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	irq_set_probe(irq);
 	return 0;
 }
 
@@ -225,7 +225,7 @@
 	return handled;
 }
 
-static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc)
+static void vic_handle_irq_cascaded(struct irq_desc *desc)
 {
 	u32 stat, hwirq;
 	struct irq_chip *host_chip = irq_desc_get_chip(desc);
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
index 8371d99..f9af0af 100644
--- a/drivers/irqchip/irq-vt8500.c
+++ b/drivers/irqchip/irq-vt8500.c
@@ -167,7 +167,6 @@
 							irq_hw_number_t hw)
 {
 	irq_set_chip_and_handler(virq, &vt8500_irq_chip, handle_level_irq);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 4cbd9c5..1ccd2ab 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -182,7 +182,7 @@
 	&spear320_shirq_intrcomm_ras,
 };
 
-static void shirq_handler(unsigned __irq, struct irq_desc *desc)
+static void shirq_handler(struct irq_desc *desc)
 {
 	struct spear_shirq *shirq = irq_desc_get_handler_data(desc);
 	u32 pend;
@@ -211,7 +211,6 @@
 	for (i = 0; i < shirq->nr_irqs; i++) {
 		irq_set_chip_and_handler(shirq->virq_base + i,
 					 shirq->irq_chip, handle_simple_irq);
-		set_irq_flags(shirq->virq_base + i, IRQF_VALID);
 		irq_set_chip_data(shirq->virq_base + i, shirq);
 	}
 }
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 0e5d673..9600cd7 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -646,14 +646,14 @@
 
 		f1 = Read_hfc8_stable(l1p->hw, A_F1);
 		f2 = Read_hfc8(l1p->hw, A_F2);
-		df = f1 - f2;
-		if ((f1 - f2) < 0)
-			df = f1 - f2 + MAX_F_CNT + 1;
 
+		if (f1 < f2)
+			df = MAX_F_CNT + 1 + f1 - f2;
+		else
+			df = f1 - f2;
 
-		if (!df) {
+		if (!df)
 			return;	/* no complete frame in fifo */
-		}
 
 		z1 = Read_hfc16_stable(l1p->hw, A_Z1);
 		z2 = Read_hfc16(l1p->hw, A_Z2);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 70f4255..42990f2 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -170,6 +170,7 @@
 
 config LEDS_IPAQ_MICRO
 	tristate "LED Support for the Compaq iPAQ h3xxx"
+	depends on LEDS_CLASS
 	depends on MFD_IPAQ_MICRO
 	help
 	  Choose this option if you want to use the notification LED on
@@ -229,7 +230,7 @@
 	tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501"
 	depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501
 	select FW_LOADER
-	select FW_LOADER_USER_HELPER_FALLBACK
+	select FW_LOADER_USER_HELPER
 	help
 	  This option supports common operations for LP5521/5523/55231/5562/8501
 	  devices.
diff --git a/drivers/leds/leds-aat1290.c b/drivers/leds/leds-aat1290.c
index fd7c25f..ac77d36 100644
--- a/drivers/leds/leds-aat1290.c
+++ b/drivers/leds/leds-aat1290.c
@@ -331,7 +331,7 @@
 	cfg->max_brightness = b + 1;
 }
 
-int init_mm_current_scale(struct aat1290_led *led,
+static int init_mm_current_scale(struct aat1290_led *led,
 			struct aat1290_led_config_data *cfg)
 {
 	int max_mm_current_percent[] = { 20, 22, 25, 28, 32, 36, 40, 45, 50, 56,
@@ -559,6 +559,7 @@
 	{ .compatible = "skyworks,aat1290" },
 	{},
 };
+MODULE_DEVICE_TABLE(of, aat1290_led_dt_match);
 
 static struct platform_driver aat1290_led_driver = {
 	.probe		= aat1290_led_probe,
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 986fe1e..1793727 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -395,6 +395,7 @@
 	{ .compatible = "brcm,bcm6328-leds", },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, bcm6328_leds_of_match);
 
 static struct platform_driver bcm6328_leds_driver = {
 	.probe = bcm6328_leds_probe,
diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c
index 21f9693..7ea3526 100644
--- a/drivers/leds/leds-bcm6358.c
+++ b/drivers/leds/leds-bcm6358.c
@@ -226,6 +226,7 @@
 	{ .compatible = "brcm,bcm6358-leds", },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, bcm6358_leds_of_match);
 
 static struct platform_driver bcm6358_leds_driver = {
 	.probe = bcm6358_leds_probe,
diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
index 2ae8c4d..feca07b 100644
--- a/drivers/leds/leds-ktd2692.c
+++ b/drivers/leds/leds-ktd2692.c
@@ -426,6 +426,7 @@
 	{ .compatible = "kinetic,ktd2692", },
 	{ /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, ktd2692_match);
 
 static struct platform_driver ktd2692_driver = {
 	.driver = {
diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c
index df348a0..afbb140 100644
--- a/drivers/leds/leds-max77693.c
+++ b/drivers/leds/leds-max77693.c
@@ -1080,6 +1080,7 @@
 	{ .compatible = "maxim,max77693-led" },
 	{},
 };
+MODULE_DEVICE_TABLE(of, max77693_led_dt_match);
 
 static struct platform_driver max77693_led_driver = {
 	.probe		= max77693_led_probe,
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index b33514d..a95a612 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -337,6 +337,7 @@
 	{ .compatible = "lacie,ns2-leds", },
 	{},
 };
+MODULE_DEVICE_TABLE(of, of_ns2_leds_match);
 #endif /* CONFIG_OF_GPIO */
 
 struct ns2_led_priv {
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index d5415ee..3e01e6f 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -393,7 +393,7 @@
 	# of SCSI_DH if the latter isn't defined but if
 	# it is, DM_MULTIPATH must depend on it.  We get a build
 	# error if SCSI_DH=m and DM_MULTIPATH=y
-	depends on SCSI_DH || !SCSI_DH
+	depends on !SCSI_DH || SCSI
 	---help---
 	  Allow volume managers to support multipath hardware.
 
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d60c88d..4b3b6f8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -968,7 +968,8 @@
 
 /*
  * Generate a new unfragmented bio with the given size
- * This should never violate the device limitations
+ * This should never violate the device limitations (but only because
+ * max_segment_size is being constrained to PAGE_SIZE).
  *
  * This function may be called concurrently. If we allocate from the mempool
  * concurrently, there is a possibility of deadlock. For example, if we have
@@ -2045,9 +2046,20 @@
 	return fn(ti, cc->dev, cc->start, ti->len, data);
 }
 
+static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+	/*
+	 * Unfortunate constraint that is required to avoid the potential
+	 * for exceeding underlying device's max_segments limits -- due to
+	 * crypt_alloc_buffer() possibly allocating pages for the encryption
+	 * bio that are not as physically contiguous as the original bio.
+	 */
+	limits->max_segment_size = PAGE_SIZE;
+}
+
 static struct target_type crypt_target = {
 	.name   = "crypt",
-	.version = {1, 14, 0},
+	.version = {1, 14, 1},
 	.module = THIS_MODULE,
 	.ctr    = crypt_ctr,
 	.dtr    = crypt_dtr,
@@ -2058,6 +2070,7 @@
 	.resume = crypt_resume,
 	.message = crypt_message,
 	.iterate_devices = crypt_iterate_devices,
+	.io_hints = crypt_io_hints,
 };
 
 static int __init dm_crypt_init(void)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index eff7bdd..5a67671 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -159,12 +159,9 @@
 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 {
 	struct pgpath *pgpath, *tmp;
-	struct multipath *m = ti->private;
 
 	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 		list_del(&pgpath->list);
-		if (m->hw_handler_name)
-			scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
 		dm_put_device(ti, pgpath->path.dev);
 		free_pgpath(pgpath);
 	}
@@ -580,6 +577,7 @@
 		q = bdev_get_queue(p->path.dev->bdev);
 
 	if (m->retain_attached_hw_handler) {
+retain:
 		attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 		if (attached_handler_name) {
 			/*
@@ -599,20 +597,14 @@
 	}
 
 	if (m->hw_handler_name) {
-		/*
-		 * Increments scsi_dh reference, even when using an
-		 * already-attached handler.
-		 */
 		r = scsi_dh_attach(q, m->hw_handler_name);
 		if (r == -EBUSY) {
-			/*
-			 * Already attached to different hw_handler:
-			 * try to reattach with correct one.
-			 */
-			scsi_dh_detach(q);
-			r = scsi_dh_attach(q, m->hw_handler_name);
-		}
+			char b[BDEVNAME_SIZE];
 
+			printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
+				bdevname(p->path.dev->bdev, b));
+			goto retain;
+		}
 		if (r < 0) {
 			ti->error = "error attaching hardware handler";
 			dm_put_device(ti, p->path.dev);
@@ -624,7 +616,6 @@
 			if (r < 0) {
 				ti->error = "unable to set hardware "
 							"handler parameters";
-				scsi_dh_detach(q);
 				dm_put_device(ti, p->path.dev);
 				goto bad;
 			}
@@ -734,12 +725,6 @@
 		return 0;
 
 	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
-	if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
-				     "scsi_dh_%s", m->hw_handler_name)) {
-		ti->error = "unknown hardware handler type";
-		ret = -EINVAL;
-		goto fail;
-	}
 
 	if (hw_argc > 1) {
 		char *p;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 6578b7b..6fcbfb0 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -4249,6 +4249,10 @@
 {
 	struct thin_c *tc = ti->private;
 	struct pool *pool = tc->pool;
+	struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
+
+	if (!pool_limits->discard_granularity)
+		return; /* pool's discard support is disabled */
 
 	limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
 	limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index dc2aaab..217d613 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -10,6 +10,7 @@
 	select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS
 	select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
 	select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB
+	select FRAME_VECTOR
 	default n
 	---help---
 	  V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index de2474e..70c28d1 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -195,46 +195,34 @@
 }
 
 /*
- * omap_vout_uservirt_to_phys: This inline function is used to convert user
- * space virtual address to physical address.
+ * omap_vout_get_userptr: Convert user space virtual address to physical
+ * address.
  */
-static unsigned long omap_vout_uservirt_to_phys(unsigned long virtp)
+static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
+				 u32 *physp)
 {
-	unsigned long physp = 0;
-	struct vm_area_struct *vma;
-	struct mm_struct *mm = current->mm;
+	struct frame_vector *vec;
+	int ret;
 
 	/* For kernel direct-mapped memory, take the easy way */
-	if (virtp >= PAGE_OFFSET)
-		return virt_to_phys((void *) virtp);
-
-	down_read(&current->mm->mmap_sem);
-	vma = find_vma(mm, virtp);
-	if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
-		/* this will catch, kernel-allocated, mmaped-to-usermode
-		   addresses */
-		physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
-		up_read(&current->mm->mmap_sem);
-	} else {
-		/* otherwise, use get_user_pages() for general userland pages */
-		int res, nr_pages = 1;
-		struct page *pages;
-
-		res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
-				0, &pages, NULL);
-		up_read(&current->mm->mmap_sem);
-
-		if (res == nr_pages) {
-			physp =  __pa(page_address(&pages[0]) +
-					(virtp & ~PAGE_MASK));
-		} else {
-			printk(KERN_WARNING VOUT_NAME
-					"get_user_pages failed\n");
-			return 0;
-		}
+	if (virtp >= PAGE_OFFSET) {
+		*physp = virt_to_phys((void *)virtp);
+		return 0;
 	}
 
-	return physp;
+	vec = frame_vector_create(1);
+	if (!vec)
+		return -ENOMEM;
+
+	ret = get_vaddr_frames(virtp, 1, true, false, vec);
+	if (ret != 1) {
+		frame_vector_destroy(vec);
+		return -EINVAL;
+	}
+	*physp = __pfn_to_phys(frame_vector_pfns(vec)[0]);
+	vb->priv = vec;
+
+	return 0;
 }
 
 /*
@@ -784,11 +772,15 @@
 	 * address of the buffer
 	 */
 	if (V4L2_MEMORY_USERPTR == vb->memory) {
+		int ret;
+
 		if (0 == vb->baddr)
 			return -EINVAL;
 		/* Physical address */
-		vout->queued_buf_addr[vb->i] = (u8 *)
-			omap_vout_uservirt_to_phys(vb->baddr);
+		ret = omap_vout_get_userptr(vb, vb->baddr,
+				(u32 *)&vout->queued_buf_addr[vb->i]);
+		if (ret < 0)
+			return ret;
 	} else {
 		unsigned long addr, dma_addr;
 		unsigned long size;
@@ -834,12 +826,13 @@
 static void omap_vout_buffer_release(struct videobuf_queue *q,
 			    struct videobuf_buffer *vb)
 {
-	struct omap_vout_device *vout = q->priv_data;
-
 	vb->state = VIDEOBUF_NEEDS_INIT;
+	if (vb->memory == V4L2_MEMORY_USERPTR && vb->priv) {
+		struct frame_vector *vec = vb->priv;
 
-	if (V4L2_MEMORY_MMAP != vout->memory)
-		return;
+		put_vaddr_frames(vec);
+		frame_vector_destroy(vec);
+	}
 }
 
 /*
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index b4b0229..82876a6 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -84,6 +84,7 @@
 
 config VIDEOBUF2_MEMOPS
 	tristate
+	select FRAME_VECTOR
 
 config VIDEOBUF2_DMA_CONTIG
 	tristate
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index f1022d8..4f59b7e 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1691,9 +1691,7 @@
 		ret = __qbuf_mmap(vb, b);
 		break;
 	case V4L2_MEMORY_USERPTR:
-		down_read(&current->mm->mmap_sem);
 		ret = __qbuf_userptr(vb, b);
-		up_read(&current->mm->mmap_sem);
 		break;
 	case V4L2_MEMORY_DMABUF:
 		ret = __qbuf_dmabuf(vb, b);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 94c1e64..2397ceb 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -32,15 +32,13 @@
 	dma_addr_t			dma_addr;
 	enum dma_data_direction		dma_dir;
 	struct sg_table			*dma_sgt;
+	struct frame_vector		*vec;
 
 	/* MMAP related */
 	struct vb2_vmarea_handler	handler;
 	atomic_t			refcount;
 	struct sg_table			*sgt_base;
 
-	/* USERPTR related */
-	struct vm_area_struct		*vma;
-
 	/* DMABUF related */
 	struct dma_buf_attachment	*db_attach;
 };
@@ -49,24 +47,6 @@
 /*        scatterlist table functions        */
 /*********************************************/
 
-
-static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
-	void (*cb)(struct page *pg))
-{
-	struct scatterlist *s;
-	unsigned int i;
-
-	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
-		struct page *page = sg_page(s);
-		unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
-			>> PAGE_SHIFT;
-		unsigned int j;
-
-		for (j = 0; j < n_pages; ++j, ++page)
-			cb(page);
-	}
-}
-
 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
 {
 	struct scatterlist *s;
@@ -429,92 +409,12 @@
 /*       callbacks for USERPTR buffers       */
 /*********************************************/
 
-static inline int vma_is_io(struct vm_area_struct *vma)
-{
-	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
-}
-
-static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
-	struct vm_area_struct *vma, unsigned long *res)
-{
-	unsigned long pfn, start_pfn, prev_pfn;
-	unsigned int i;
-	int ret;
-
-	if (!vma_is_io(vma))
-		return -EFAULT;
-
-	ret = follow_pfn(vma, start, &pfn);
-	if (ret)
-		return ret;
-
-	start_pfn = pfn;
-	start += PAGE_SIZE;
-
-	for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
-		prev_pfn = pfn;
-		ret = follow_pfn(vma, start, &pfn);
-
-		if (ret) {
-			pr_err("no page for address %lu\n", start);
-			return ret;
-		}
-		if (pfn != prev_pfn + 1)
-			return -EINVAL;
-	}
-
-	*res = start_pfn;
-	return 0;
-}
-
-static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
-	int n_pages, struct vm_area_struct *vma,
-	enum dma_data_direction dma_dir)
-{
-	if (vma_is_io(vma)) {
-		unsigned int i;
-
-		for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
-			unsigned long pfn;
-			int ret = follow_pfn(vma, start, &pfn);
-
-			if (!pfn_valid(pfn))
-				return -EINVAL;
-
-			if (ret) {
-				pr_err("no page for address %lu\n", start);
-				return ret;
-			}
-			pages[i] = pfn_to_page(pfn);
-		}
-	} else {
-		int n;
-
-		n = get_user_pages(current, current->mm, start & PAGE_MASK,
-			n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
-		/* negative error means that no page was pinned */
-		n = max(n, 0);
-		if (n != n_pages) {
-			pr_err("got only %d of %d user pages\n", n, n_pages);
-			while (n)
-				put_page(pages[--n]);
-			return -EFAULT;
-		}
-	}
-
-	return 0;
-}
-
-static void vb2_dc_put_dirty_page(struct page *page)
-{
-	set_page_dirty_lock(page);
-	put_page(page);
-}
-
 static void vb2_dc_put_userptr(void *buf_priv)
 {
 	struct vb2_dc_buf *buf = buf_priv;
 	struct sg_table *sgt = buf->dma_sgt;
+	int i;
+	struct page **pages;
 
 	if (sgt) {
 		DEFINE_DMA_ATTRS(attrs);
@@ -526,13 +426,15 @@
 		 */
 		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
 				   buf->dma_dir, &attrs);
-		if (!vma_is_io(buf->vma))
-			vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
-
+		pages = frame_vector_pages(buf->vec);
+		/* sgt should exist only if vector contains pages... */
+		BUG_ON(IS_ERR(pages));
+		for (i = 0; i < frame_vector_count(buf->vec); i++)
+			set_page_dirty_lock(pages[i]);
 		sg_free_table(sgt);
 		kfree(sgt);
 	}
-	vb2_put_vma(buf->vma);
+	vb2_destroy_framevec(buf->vec);
 	kfree(buf);
 }
 
@@ -572,13 +474,10 @@
 {
 	struct vb2_dc_conf *conf = alloc_ctx;
 	struct vb2_dc_buf *buf;
-	unsigned long start;
-	unsigned long end;
+	struct frame_vector *vec;
 	unsigned long offset;
-	struct page **pages;
-	int n_pages;
+	int n_pages, i;
 	int ret = 0;
-	struct vm_area_struct *vma;
 	struct sg_table *sgt;
 	unsigned long contig_size;
 	unsigned long dma_align = dma_get_cache_alignment();
@@ -604,72 +503,43 @@
 	buf->dev = conf->dev;
 	buf->dma_dir = dma_dir;
 
-	start = vaddr & PAGE_MASK;
 	offset = vaddr & ~PAGE_MASK;
-	end = PAGE_ALIGN(vaddr + size);
-	n_pages = (end - start) >> PAGE_SHIFT;
-
-	pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
-	if (!pages) {
-		ret = -ENOMEM;
-		pr_err("failed to allocate pages table\n");
+	vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
+	if (IS_ERR(vec)) {
+		ret = PTR_ERR(vec);
 		goto fail_buf;
 	}
+	buf->vec = vec;
+	n_pages = frame_vector_count(vec);
+	ret = frame_vector_to_pages(vec);
+	if (ret < 0) {
+		unsigned long *nums = frame_vector_pfns(vec);
 
-	/* current->mm->mmap_sem is taken by videobuf2 core */
-	vma = find_vma(current->mm, vaddr);
-	if (!vma) {
-		pr_err("no vma for address %lu\n", vaddr);
-		ret = -EFAULT;
-		goto fail_pages;
-	}
-
-	if (vma->vm_end < vaddr + size) {
-		pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
-		ret = -EFAULT;
-		goto fail_pages;
-	}
-
-	buf->vma = vb2_get_vma(vma);
-	if (!buf->vma) {
-		pr_err("failed to copy vma\n");
-		ret = -ENOMEM;
-		goto fail_pages;
-	}
-
-	/* extract page list from userspace mapping */
-	ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir);
-	if (ret) {
-		unsigned long pfn;
-		if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
-			buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
-			buf->size = size;
-			kfree(pages);
-			return buf;
-		}
-
-		pr_err("failed to get user pages\n");
-		goto fail_vma;
+		/*
+		 * Failed to convert to pages... Check the memory is physically
+		 * contiguous and use direct mapping
+		 */
+		for (i = 1; i < n_pages; i++)
+			if (nums[i-1] + 1 != nums[i])
+				goto fail_pfnvec;
+		buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
+		goto out;
 	}
 
 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
 	if (!sgt) {
 		pr_err("failed to allocate sg table\n");
 		ret = -ENOMEM;
-		goto fail_get_user_pages;
+		goto fail_pfnvec;
 	}
 
-	ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
+	ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
 		offset, size, GFP_KERNEL);
 	if (ret) {
 		pr_err("failed to initialize sg table\n");
 		goto fail_sgt;
 	}
 
-	/* pages are no longer needed */
-	kfree(pages);
-	pages = NULL;
-
 	/*
 	 * No need to sync to the device, this will happen later when the
 	 * prepare() memop is called.
@@ -691,8 +561,9 @@
 	}
 
 	buf->dma_addr = sg_dma_address(sgt->sgl);
-	buf->size = size;
 	buf->dma_sgt = sgt;
+out:
+	buf->size = size;
 
 	return buf;
 
@@ -701,23 +572,13 @@
 			   buf->dma_dir, &attrs);
 
 fail_sgt_init:
-	if (!vma_is_io(buf->vma))
-		vb2_dc_sgt_foreach_page(sgt, put_page);
 	sg_free_table(sgt);
 
 fail_sgt:
 	kfree(sgt);
 
-fail_get_user_pages:
-	if (pages && !vma_is_io(buf->vma))
-		while (n_pages)
-			put_page(pages[--n_pages]);
-
-fail_vma:
-	vb2_put_vma(buf->vma);
-
-fail_pages:
-	kfree(pages); /* kfree is NULL-proof */
+fail_pfnvec:
+	vb2_destroy_framevec(vec);
 
 fail_buf:
 	kfree(buf);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 7289b81..be7bd65 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -38,6 +38,7 @@
 	struct device			*dev;
 	void				*vaddr;
 	struct page			**pages;
+	struct frame_vector		*vec;
 	int				offset;
 	enum dma_data_direction		dma_dir;
 	struct sg_table			sg_table;
@@ -51,7 +52,6 @@
 	unsigned int			num_pages;
 	atomic_t			refcount;
 	struct vb2_vmarea_handler	handler;
-	struct vm_area_struct		*vma;
 
 	struct dma_buf_attachment	*db_attach;
 };
@@ -225,25 +225,17 @@
 	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
 }
 
-static inline int vma_is_io(struct vm_area_struct *vma)
-{
-	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
-}
-
 static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
 				    unsigned long size,
 				    enum dma_data_direction dma_dir)
 {
 	struct vb2_dma_sg_conf *conf = alloc_ctx;
 	struct vb2_dma_sg_buf *buf;
-	unsigned long first, last;
-	int num_pages_from_user;
-	struct vm_area_struct *vma;
 	struct sg_table *sgt;
 	DEFINE_DMA_ATTRS(attrs);
+	struct frame_vector *vec;
 
 	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
-
 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
 	if (!buf)
 		return NULL;
@@ -254,61 +246,19 @@
 	buf->offset = vaddr & ~PAGE_MASK;
 	buf->size = size;
 	buf->dma_sgt = &buf->sg_table;
+	vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE);
+	if (IS_ERR(vec))
+		goto userptr_fail_pfnvec;
+	buf->vec = vec;
 
-	first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
-	last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
-	buf->num_pages = last - first + 1;
-
-	buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
-			     GFP_KERNEL);
-	if (!buf->pages)
-		goto userptr_fail_alloc_pages;
-
-	vma = find_vma(current->mm, vaddr);
-	if (!vma) {
-		dprintk(1, "no vma for address %lu\n", vaddr);
-		goto userptr_fail_find_vma;
-	}
-
-	if (vma->vm_end < vaddr + size) {
-		dprintk(1, "vma at %lu is too small for %lu bytes\n",
-			vaddr, size);
-		goto userptr_fail_find_vma;
-	}
-
-	buf->vma = vb2_get_vma(vma);
-	if (!buf->vma) {
-		dprintk(1, "failed to copy vma\n");
-		goto userptr_fail_find_vma;
-	}
-
-	if (vma_is_io(buf->vma)) {
-		for (num_pages_from_user = 0;
-		     num_pages_from_user < buf->num_pages;
-		     ++num_pages_from_user, vaddr += PAGE_SIZE) {
-			unsigned long pfn;
-
-			if (follow_pfn(vma, vaddr, &pfn)) {
-				dprintk(1, "no page for address %lu\n", vaddr);
-				break;
-			}
-			buf->pages[num_pages_from_user] = pfn_to_page(pfn);
-		}
-	} else
-		num_pages_from_user = get_user_pages(current, current->mm,
-					     vaddr & PAGE_MASK,
-					     buf->num_pages,
-					     buf->dma_dir == DMA_FROM_DEVICE,
-					     1, /* force */
-					     buf->pages,
-					     NULL);
-
-	if (num_pages_from_user != buf->num_pages)
-		goto userptr_fail_get_user_pages;
+	buf->pages = frame_vector_pages(vec);
+	if (IS_ERR(buf->pages))
+		goto userptr_fail_sgtable;
+	buf->num_pages = frame_vector_count(vec);
 
 	if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
 			buf->num_pages, buf->offset, size, 0))
-		goto userptr_fail_alloc_table_from_pages;
+		goto userptr_fail_sgtable;
 
 	sgt = &buf->sg_table;
 	/*
@@ -324,17 +274,9 @@
 
 userptr_fail_map:
 	sg_free_table(&buf->sg_table);
-userptr_fail_alloc_table_from_pages:
-userptr_fail_get_user_pages:
-	dprintk(1, "get_user_pages requested/got: %d/%d]\n",
-		buf->num_pages, num_pages_from_user);
-	if (!vma_is_io(buf->vma))
-		while (--num_pages_from_user >= 0)
-			put_page(buf->pages[num_pages_from_user]);
-	vb2_put_vma(buf->vma);
-userptr_fail_find_vma:
-	kfree(buf->pages);
-userptr_fail_alloc_pages:
+userptr_fail_sgtable:
+	vb2_destroy_framevec(vec);
+userptr_fail_pfnvec:
 	kfree(buf);
 	return NULL;
 }
@@ -362,11 +304,8 @@
 	while (--i >= 0) {
 		if (buf->dma_dir == DMA_FROM_DEVICE)
 			set_page_dirty_lock(buf->pages[i]);
-		if (!vma_is_io(buf->vma))
-			put_page(buf->pages[i]);
 	}
-	kfree(buf->pages);
-	vb2_put_vma(buf->vma);
+	vb2_destroy_framevec(buf->vec);
 	kfree(buf);
 }
 
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 0d49b79..48c6a49 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -23,118 +23,62 @@
 #include <media/videobuf2-memops.h>
 
 /**
- * vb2_get_vma() - acquire and lock the virtual memory area
- * @vma:	given virtual memory area
+ * vb2_create_framevec() - map virtual addresses to pfns
+ * @start:	Virtual user address where we start mapping
+ * @length:	Length of a range to map
+ * @write:	Should we map for writing into the area
  *
- * This function attempts to acquire an area mapped in the userspace for
- * the duration of a hardware operation. The area is "locked" by performing
- * the same set of operation that are done when process calls fork() and
- * memory areas are duplicated.
- *
- * Returns a copy of a virtual memory region on success or NULL.
+ * This function allocates and fills in a vector with pfns corresponding to
+ * virtual address range passed in arguments. If pfns have corresponding pages,
+ * page references are also grabbed to pin pages in memory. The function
+ * returns pointer to the vector on success and error pointer in case of
+ * failure. Returned vector needs to be freed via vb2_destroy_pfnvec().
  */
-struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
+struct frame_vector *vb2_create_framevec(unsigned long start,
+					 unsigned long length,
+					 bool write)
 {
-	struct vm_area_struct *vma_copy;
+	int ret;
+	unsigned long first, last;
+	unsigned long nr;
+	struct frame_vector *vec;
 
-	vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
-	if (vma_copy == NULL)
-		return NULL;
-
-	if (vma->vm_ops && vma->vm_ops->open)
-		vma->vm_ops->open(vma);
-
-	if (vma->vm_file)
-		get_file(vma->vm_file);
-
-	memcpy(vma_copy, vma, sizeof(*vma));
-
-	vma_copy->vm_mm = NULL;
-	vma_copy->vm_next = NULL;
-	vma_copy->vm_prev = NULL;
-
-	return vma_copy;
-}
-EXPORT_SYMBOL_GPL(vb2_get_vma);
-
-/**
- * vb2_put_userptr() - release a userspace virtual memory area
- * @vma:	virtual memory region associated with the area to be released
- *
- * This function releases the previously acquired memory area after a hardware
- * operation.
- */
-void vb2_put_vma(struct vm_area_struct *vma)
-{
-	if (!vma)
-		return;
-
-	if (vma->vm_ops && vma->vm_ops->close)
-		vma->vm_ops->close(vma);
-
-	if (vma->vm_file)
-		fput(vma->vm_file);
-
-	kfree(vma);
-}
-EXPORT_SYMBOL_GPL(vb2_put_vma);
-
-/**
- * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
- * @vaddr:	starting virtual address of the area to be verified
- * @size:	size of the area
- * @res_paddr:	will return physical address for the given vaddr
- * @res_vma:	will return locked copy of struct vm_area for the given area
- *
- * This function will go through memory area of size @size mapped at @vaddr and
- * verify that the underlying physical pages are contiguous. If they are
- * contiguous the virtual memory area is locked and a @res_vma is filled with
- * the copy and @res_pa set to the physical address of the buffer.
- *
- * Returns 0 on success.
- */
-int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
-			   struct vm_area_struct **res_vma, dma_addr_t *res_pa)
-{
-	struct mm_struct *mm = current->mm;
-	struct vm_area_struct *vma;
-	unsigned long offset, start, end;
-	unsigned long this_pfn, prev_pfn;
-	dma_addr_t pa = 0;
-
-	start = vaddr;
-	offset = start & ~PAGE_MASK;
-	end = start + size;
-
-	vma = find_vma(mm, start);
-
-	if (vma == NULL || vma->vm_end < end)
-		return -EFAULT;
-
-	for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
-		int ret = follow_pfn(vma, start, &this_pfn);
-		if (ret)
-			return ret;
-
-		if (prev_pfn == 0)
-			pa = this_pfn << PAGE_SHIFT;
-		else if (this_pfn != prev_pfn + 1)
-			return -EFAULT;
-
-		prev_pfn = this_pfn;
+	first = start >> PAGE_SHIFT;
+	last = (start + length - 1) >> PAGE_SHIFT;
+	nr = last - first + 1;
+	vec = frame_vector_create(nr);
+	if (!vec)
+		return ERR_PTR(-ENOMEM);
+	ret = get_vaddr_frames(start, nr, write, 1, vec);
+	if (ret < 0)
+		goto out_destroy;
+	/* We accept only complete set of PFNs */
+	if (ret != nr) {
+		ret = -EFAULT;
+		goto out_release;
 	}
-
-	/*
-	 * Memory is contiguous, lock vma and return to the caller
-	 */
-	*res_vma = vb2_get_vma(vma);
-	if (*res_vma == NULL)
-		return -ENOMEM;
-
-	*res_pa = pa + offset;
-	return 0;
+	return vec;
+out_release:
+	put_vaddr_frames(vec);
+out_destroy:
+	frame_vector_destroy(vec);
+	return ERR_PTR(ret);
 }
-EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
+EXPORT_SYMBOL(vb2_create_framevec);
+
+/**
+ * vb2_destroy_framevec() - release vector of mapped pfns
+ * @vec:	vector of pfns / pages to release
+ *
+ * This releases references to all pages in the vector @vec (if corresponding
+ * pfns are backed by pages) and frees the passed vector.
+ */
+void vb2_destroy_framevec(struct frame_vector *vec)
+{
+	put_vaddr_frames(vec);
+	frame_vector_destroy(vec);
+}
+EXPORT_SYMBOL(vb2_destroy_framevec);
 
 /**
  * vb2_common_vm_open() - increase refcount of the vma
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 2fe4c27..ecb8f0c 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -23,11 +23,9 @@
 
 struct vb2_vmalloc_buf {
 	void				*vaddr;
-	struct page			**pages;
-	struct vm_area_struct		*vma;
+	struct frame_vector		*vec;
 	enum dma_data_direction		dma_dir;
 	unsigned long			size;
-	unsigned int			n_pages;
 	atomic_t			refcount;
 	struct vb2_vmarea_handler	handler;
 	struct dma_buf			*dbuf;
@@ -76,10 +74,8 @@
 				     enum dma_data_direction dma_dir)
 {
 	struct vb2_vmalloc_buf *buf;
-	unsigned long first, last;
-	int n_pages, offset;
-	struct vm_area_struct *vma;
-	dma_addr_t physp;
+	struct frame_vector *vec;
+	int n_pages, offset, i;
 
 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 	if (!buf)
@@ -88,51 +84,36 @@
 	buf->dma_dir = dma_dir;
 	offset = vaddr & ~PAGE_MASK;
 	buf->size = size;
+	vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
+	if (IS_ERR(vec))
+		goto fail_pfnvec_create;
+	buf->vec = vec;
+	n_pages = frame_vector_count(vec);
+	if (frame_vector_to_pages(vec) < 0) {
+		unsigned long *nums = frame_vector_pfns(vec);
 
-
-	vma = find_vma(current->mm, vaddr);
-	if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
-		if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
-			goto fail_pages_array_alloc;
-		buf->vma = vma;
-		buf->vaddr = (__force void *)ioremap_nocache(physp, size);
-		if (!buf->vaddr)
-			goto fail_pages_array_alloc;
+		/*
+		 * We cannot get page pointers for these pfns. Check memory is
+		 * physically contiguous and use direct mapping.
+		 */
+		for (i = 1; i < n_pages; i++)
+			if (nums[i-1] + 1 != nums[i])
+				goto fail_map;
+		buf->vaddr = (__force void *)
+				ioremap_nocache(nums[0] << PAGE_SHIFT, size);
 	} else {
-		first = vaddr >> PAGE_SHIFT;
-		last  = (vaddr + size - 1) >> PAGE_SHIFT;
-		buf->n_pages = last - first + 1;
-		buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
-				     GFP_KERNEL);
-		if (!buf->pages)
-			goto fail_pages_array_alloc;
-
-		/* current->mm->mmap_sem is taken by videobuf2 core */
-		n_pages = get_user_pages(current, current->mm,
-					 vaddr & PAGE_MASK, buf->n_pages,
-					 dma_dir == DMA_FROM_DEVICE,
-					 1, /* force */
-					 buf->pages, NULL);
-		if (n_pages != buf->n_pages)
-			goto fail_get_user_pages;
-
-		buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
+		buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
 					PAGE_KERNEL);
-		if (!buf->vaddr)
-			goto fail_get_user_pages;
 	}
 
+	if (!buf->vaddr)
+		goto fail_map;
 	buf->vaddr += offset;
 	return buf;
 
-fail_get_user_pages:
-	pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
-		 buf->n_pages);
-	while (--n_pages >= 0)
-		put_page(buf->pages[n_pages]);
-	kfree(buf->pages);
-
-fail_pages_array_alloc:
+fail_map:
+	vb2_destroy_framevec(vec);
+fail_pfnvec_create:
 	kfree(buf);
 
 	return NULL;
@@ -143,20 +124,21 @@
 	struct vb2_vmalloc_buf *buf = buf_priv;
 	unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
 	unsigned int i;
+	struct page **pages;
+	unsigned int n_pages;
 
-	if (buf->pages) {
+	if (!buf->vec->is_pfns) {
+		n_pages = frame_vector_count(buf->vec);
+		pages = frame_vector_pages(buf->vec);
 		if (vaddr)
-			vm_unmap_ram((void *)vaddr, buf->n_pages);
-		for (i = 0; i < buf->n_pages; ++i) {
-			if (buf->dma_dir == DMA_FROM_DEVICE)
-				set_page_dirty_lock(buf->pages[i]);
-			put_page(buf->pages[i]);
-		}
-		kfree(buf->pages);
+			vm_unmap_ram((void *)vaddr, n_pages);
+		if (buf->dma_dir == DMA_FROM_DEVICE)
+			for (i = 0; i < n_pages; i++)
+				set_page_dirty_lock(pages[i]);
 	} else {
-		vb2_put_vma(buf->vma);
 		iounmap((__force void __iomem *)buf->vaddr);
 	}
+	vb2_destroy_framevec(buf->vec);
 	kfree(buf);
 }
 
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 4b54128..a726f01 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -138,7 +138,7 @@
 	spin_unlock_irqrestore(&asic->lock, flags);
 }
 
-static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void asic3_irq_demux(struct irq_desc *desc)
 {
 	struct asic3 *asic = irq_desc_get_handler_data(desc);
 	struct irq_data *data = irq_desc_get_irq_data(desc);
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index a76eb6e..b279205 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -205,7 +205,7 @@
 	} while (gpio_get_value(pdata->gpio));
 }
 
-static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void pcap_irq_handler(struct irq_desc *desc)
 {
 	struct pcap_chip *pcap = irq_desc_get_handler_data(desc);
 
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index 9131cdc..6ccaf90 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -98,7 +98,7 @@
 	.irq_unmask	= egpio_unmask,
 };
 
-static void egpio_handler(unsigned int irq, struct irq_desc *desc)
+static void egpio_handler(struct irq_desc *desc)
 {
 	struct egpio_info *ei = irq_desc_get_handler_data(desc);
 	int irqpin;
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 5bb49f0..798e443 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -65,7 +65,7 @@
 	spinlock_t lock;
 };
 
-static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void jz4740_adc_irq_demux(struct irq_desc *desc)
 {
 	struct irq_chip_generic *gc = irq_desc_get_handler_data(desc);
 	uint8_t status;
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index 59502d0..1b7ec08 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -156,7 +156,7 @@
 	return ret;
 }
 
-static void pm8xxx_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void pm8xxx_irq_handler(struct irq_desc *desc)
 {
 	struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
 	struct irq_chip *irq_chip = irq_desc_get_chip(desc);
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 16fc1ad..94bd89c 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -185,7 +185,7 @@
 /*--------------------------------------------------------------------------*/
 
 /* Handle the T7L66XB interrupt mux */
-static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc)
+static void t7l66xb_irq(struct irq_desc *desc)
 {
 	struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc);
 	unsigned int isr;
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 775b9ac..8c84a51 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -522,8 +522,7 @@
 
 /*--------------------------------------------------------------------------*/
 
-static void
-tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
+static void tc6393xb_irq(struct irq_desc *desc)
 {
 	struct tc6393xb *tc6393xb = irq_desc_get_handler_data(desc);
 	unsigned int isr;
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 9a23021..f691d7e 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -282,7 +282,7 @@
  * SIBCLK to talk to the chip.  We leave the clock running until
  * we have finished processing all interrupts from the chip.
  */
-static void ucb1x00_irq(unsigned int __irq, struct irq_desc *desc)
+static void ucb1x00_irq(struct irq_desc *desc)
 {
 	struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
 	unsigned int isr, i;
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 6f484df..6982f60 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Werror
+ccflags-y := -Werror -Wno-unused-const-variable
 
 cxl-y				+= main.o file.o irq.o fault.o native.o
 cxl-y				+= context.o sysfs.o debugfs.o pci.o trace.o
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 02c8516..a5e9771 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1249,8 +1249,6 @@
 	int slice;
 	int rc;
 
-	pci_dev_get(dev);
-
 	if (cxl_verbose)
 		dump_cxl_config_space(dev);
 
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 25868c2..02006f71 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -592,6 +592,8 @@
 
 	/* conditionally create the add the binary file for error info buffer */
 	if (afu->eb_len) {
+		sysfs_attr_init(&afu->attr_eb.attr);
+
 		afu->attr_eb.attr.name = "afu_err_buff";
 		afu->attr_eb.attr.mode = S_IRUGO;
 		afu->attr_eb.size = afu->eb_len;
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 6dd16a6..94b5208 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -48,6 +48,12 @@
 
 	phb = pci_bus_to_host(dev->bus);
 	afu = (struct cxl_afu *)phb->private_data;
+
+	if (!cxl_adapter_link_ok(afu->adapter)) {
+		dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
+		return false;
+	}
+
 	set_dma_ops(&dev->dev, &dma_direct_ops);
 	set_dma_offset(&dev->dev, PAGE_OFFSET);
 
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index 4b469cf..8504dbe 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -204,6 +204,8 @@
 	if (!dir)
 		return -ENOMEM;
 
+	dev->dbgfs_dir = dir;
+
 	f = debugfs_create_file("meclients", S_IRUSR, dir,
 				dev, &mei_dbgfs_fops_meclients);
 	if (!f) {
@@ -228,7 +230,6 @@
 		dev_err(dev->dev, "allow_fixed_address: registration failed\n");
 		goto err;
 	}
-	dev->dbgfs_dir = dir;
 	return 0;
 err:
 	mei_dbgfs_deregister(dev);
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 2bc0f50..b346638 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -364,6 +364,7 @@
 
 	int ret;
 
+	amt_wd_dev.parent = dev->dev;
 	/* unlock to perserve correct locking order */
 	mutex_unlock(&dev->device_lock);
 	ret = watchdog_register_device(&amt_wd_dev);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0520064..a3eb20b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -134,9 +134,11 @@
 	int err = cmd->error;
 
 	/* Flag re-tuning needed on CRC errors */
-	if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
+	if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
+	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
 	    (mrq->data && mrq->data->error == -EILSEQ) ||
-	    (mrq->stop && mrq->stop->error == -EILSEQ))
+	    (mrq->stop && mrq->stop->error == -EILSEQ)))
 		mmc_retune_needed(host);
 
 	if (err && cmd->retries && mmc_host_is_spi(host)) {
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index abd933b..5466f25 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -457,7 +457,7 @@
 					   0, &cd_gpio_invert);
 		if (!ret)
 			dev_info(host->parent, "Got CD GPIO\n");
-		else if (ret != -ENOENT)
+		else if (ret != -ENOENT && ret != -ENOSYS)
 			return ret;
 
 		/*
@@ -481,7 +481,7 @@
 	ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
 	if (!ret)
 		dev_info(host->parent, "Got WP GPIO\n");
-	else if (ret != -ENOENT)
+	else if (ret != -ENOENT && ret != -ENOSYS)
 		return ret;
 
 	if (of_property_read_bool(np, "disable-wp"))
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 1420f29..8cadd74 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -28,6 +28,7 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
 #include <linux/io.h>
 #include <linux/regulator/consumer.h>
 #include <linux/gpio.h>
@@ -454,12 +455,8 @@
 {
 	struct pxamci_host *host = mmc_priv(mmc);
 
-	if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
-		if (host->pdata->gpio_card_ro_invert)
-			return !gpio_get_value(host->pdata->gpio_card_ro);
-		else
-			return gpio_get_value(host->pdata->gpio_card_ro);
-	}
+	if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro))
+		return mmc_gpio_get_ro(mmc);
 	if (host->pdata && host->pdata->get_ro)
 		return !!host->pdata->get_ro(mmc_dev(mmc));
 	/*
@@ -551,6 +548,7 @@
 
 static const struct mmc_host_ops pxamci_ops = {
 	.request		= pxamci_request,
+	.get_cd			= mmc_gpio_get_cd,
 	.get_ro			= pxamci_get_ro,
 	.set_ios		= pxamci_set_ios,
 	.enable_sdio_irq	= pxamci_enable_sdio_irq,
@@ -790,37 +788,31 @@
 		gpio_power = host->pdata->gpio_power;
 	}
 	if (gpio_is_valid(gpio_power)) {
-		ret = gpio_request(gpio_power, "mmc card power");
+		ret = devm_gpio_request(&pdev->dev, gpio_power,
+					"mmc card power");
 		if (ret) {
-			dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
+			dev_err(&pdev->dev, "Failed requesting gpio_power %d\n",
+				gpio_power);
 			goto out;
 		}
 		gpio_direction_output(gpio_power,
 				      host->pdata->gpio_power_invert);
 	}
-	if (gpio_is_valid(gpio_ro)) {
-		ret = gpio_request(gpio_ro, "mmc card read only");
-		if (ret) {
-			dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
-			goto err_gpio_ro;
-		}
-		gpio_direction_input(gpio_ro);
+	if (gpio_is_valid(gpio_ro))
+		ret = mmc_gpio_request_ro(mmc, gpio_ro);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
+		goto out;
+	} else {
+		mmc->caps |= host->pdata->gpio_card_ro_invert ?
+			MMC_CAP2_RO_ACTIVE_HIGH : 0;
 	}
-	if (gpio_is_valid(gpio_cd)) {
-		ret = gpio_request(gpio_cd, "mmc card detect");
-		if (ret) {
-			dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
-			goto err_gpio_cd;
-		}
-		gpio_direction_input(gpio_cd);
 
-		ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
-				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-				  "mmc card detect", mmc);
-		if (ret) {
-			dev_err(&pdev->dev, "failed to request card detect IRQ\n");
-			goto err_request_irq;
-		}
+	if (gpio_is_valid(gpio_cd))
+		ret = mmc_gpio_request_cd(mmc, gpio_cd, 0);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
+		goto out;
 	}
 
 	if (host->pdata && host->pdata->init)
@@ -835,13 +827,7 @@
 
 	return 0;
 
-err_request_irq:
-	gpio_free(gpio_cd);
-err_gpio_cd:
-	gpio_free(gpio_ro);
-err_gpio_ro:
-	gpio_free(gpio_power);
- out:
+out:
 	if (host) {
 		if (host->dma_chan_rx)
 			dma_release_channel(host->dma_chan_rx);
@@ -873,14 +859,6 @@
 			gpio_ro = host->pdata->gpio_card_ro;
 			gpio_power = host->pdata->gpio_power;
 		}
-		if (gpio_is_valid(gpio_cd)) {
-			free_irq(gpio_to_irq(gpio_cd), mmc);
-			gpio_free(gpio_cd);
-		}
-		if (gpio_is_valid(gpio_ro))
-			gpio_free(gpio_ro);
-		if (gpio_is_valid(gpio_power))
-			gpio_free(gpio_power);
 		if (host->vcc)
 			regulator_put(host->vcc);
 
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index a7b7a67..b981b85 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -210,6 +210,16 @@
 #define SDXC_IDMAC_DES0_CES	BIT(30) /* card error summary */
 #define SDXC_IDMAC_DES0_OWN	BIT(31) /* 1-idma owns it, 0-host owns it */
 
+#define SDXC_CLK_400K		0
+#define SDXC_CLK_25M		1
+#define SDXC_CLK_50M		2
+#define SDXC_CLK_50M_DDR	3
+
+struct sunxi_mmc_clk_delay {
+	u32 output;
+	u32 sample;
+};
+
 struct sunxi_idma_des {
 	u32	config;
 	u32	buf_size;
@@ -229,6 +239,7 @@
 	struct clk	*clk_mmc;
 	struct clk	*clk_sample;
 	struct clk	*clk_output;
+	const struct sunxi_mmc_clk_delay *clk_delays;
 
 	/* irq */
 	spinlock_t	lock;
@@ -654,25 +665,19 @@
 
 	/* determine delays */
 	if (rate <= 400000) {
-		oclk_dly = 180;
-		sclk_dly = 42;
+		oclk_dly = host->clk_delays[SDXC_CLK_400K].output;
+		sclk_dly = host->clk_delays[SDXC_CLK_400K].sample;
 	} else if (rate <= 25000000) {
-		oclk_dly = 180;
-		sclk_dly = 75;
+		oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
+		sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
 	} else if (rate <= 50000000) {
 		if (ios->timing == MMC_TIMING_UHS_DDR50) {
-			oclk_dly = 60;
-			sclk_dly = 120;
+			oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
+			sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
 		} else {
-			oclk_dly = 90;
-			sclk_dly = 150;
+			oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
+			sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
 		}
-	} else if (rate <= 100000000) {
-		oclk_dly = 6;
-		sclk_dly = 24;
-	} else if (rate <= 200000000) {
-		oclk_dly = 3;
-		sclk_dly = 12;
 	} else {
 		return -EINVAL;
 	}
@@ -871,6 +876,7 @@
 static const struct of_device_id sunxi_mmc_of_match[] = {
 	{ .compatible = "allwinner,sun4i-a10-mmc", },
 	{ .compatible = "allwinner,sun5i-a13-mmc", },
+	{ .compatible = "allwinner,sun9i-a80-mmc", },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
@@ -884,6 +890,20 @@
 	.hw_reset	 = sunxi_mmc_hw_reset,
 };
 
+static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
+	[SDXC_CLK_400K]		= { .output = 180, .sample = 180 },
+	[SDXC_CLK_25M]		= { .output = 180, .sample =  75 },
+	[SDXC_CLK_50M]		= { .output =  90, .sample = 120 },
+	[SDXC_CLK_50M_DDR]	= { .output =  60, .sample = 120 },
+};
+
+static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
+	[SDXC_CLK_400K]		= { .output = 180, .sample = 180 },
+	[SDXC_CLK_25M]		= { .output = 180, .sample =  75 },
+	[SDXC_CLK_50M]		= { .output = 150, .sample = 120 },
+	[SDXC_CLK_50M_DDR]	= { .output =  90, .sample = 120 },
+};
+
 static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
 				      struct platform_device *pdev)
 {
@@ -895,6 +915,11 @@
 	else
 		host->idma_des_size_bits = 16;
 
+	if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc"))
+		host->clk_delays = sun9i_mmc_clk_delays;
+	else
+		host->clk_delays = sunxi_mmc_clk_delays;
+
 	ret = mmc_regulator_get_supply(host->mmc);
 	if (ret) {
 		if (ret != -EPROBE_DEFER)
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 5bbd1f0..1fc23e4 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -926,6 +926,11 @@
 		goto bad;
 	}
 
+	if (data_size > ubi->leb_size) {
+		ubi_err(ubi, "bad data_size");
+		goto bad;
+	}
+
 	if (vol_type == UBI_VID_STATIC) {
 		/*
 		 * Although from high-level point of view static volumes may
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 80bdd5b..d85c197 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -649,6 +649,7 @@
 		if (ubi->corr_peb_count)
 			ubi_err(ubi, "%d PEBs are corrupted and not used",
 				ubi->corr_peb_count);
+		return -ENOSPC;
 	}
 	ubi->rsvd_pebs += reserved_pebs;
 	ubi->avail_pebs -= reserved_pebs;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 275d9fb..eb4489f9 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1601,6 +1601,7 @@
 		if (ubi->corr_peb_count)
 			ubi_err(ubi, "%d PEBs are corrupted and not used",
 				ubi->corr_peb_count);
+		err = -ENOSPC;
 		goto out_free;
 	}
 	ubi->avail_pebs -= reserved_pebs;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d18eb60..b9ebd0d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -299,6 +299,7 @@
 config NET_VRF
 	tristate "Virtual Routing and Forwarding (Lite)"
 	depends on IP_MULTIPLE_TABLES && IPV6_MULTIPLE_TABLES
+	depends on NET_L3_MASTER_DEV
 	---help---
 	  This option enables the support for mapping interfaces into VRF's. The
 	  support enables VRF devices.
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index 705e6ce..d78f301 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -1,6 +1,6 @@
 /*
  * Linux ARCnet driver - "raw mode" packet encapsulation (no soft headers)
- * 
+ *
  * Written 1994-1999 by Avery Pennarun.
  * Derived from skeleton.c by Donald Becker.
  *
@@ -24,6 +24,8 @@
  * **********************
  */
 
+#define pr_fmt(fmt) "arcnet:" KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/gfp.h>
 #include <linux/init.h>
@@ -31,20 +33,124 @@
 #include <net/arp.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
-#include <linux/arcdevice.h>
+#include "arcdevice.h"
 
-#define VERSION "arcnet: raw mode (`r') encapsulation support loaded.\n"
-
-
+/* packet receiver */
 static void rx(struct net_device *dev, int bufnum,
-	       struct archdr *pkthdr, int length);
-static int build_header(struct sk_buff *skb, struct net_device *dev,
-			unsigned short type, uint8_t daddr);
-static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
-		      int bufnum);
-
-static struct ArcProto rawmode_proto =
+	       struct archdr *pkthdr, int length)
 {
+	struct arcnet_local *lp = netdev_priv(dev);
+	struct sk_buff *skb;
+	struct archdr *pkt = pkthdr;
+	int ofs;
+
+	arc_printk(D_DURING, dev, "it's a raw packet (length=%d)\n", length);
+
+	if (length > MTU)
+		ofs = 512 - length;
+	else
+		ofs = 256 - length;
+
+	skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
+	if (!skb) {
+		dev->stats.rx_dropped++;
+		return;
+	}
+	skb_put(skb, length + ARC_HDR_SIZE);
+	skb->dev = dev;
+
+	pkt = (struct archdr *)skb->data;
+
+	skb_reset_mac_header(skb);
+	skb_pull(skb, ARC_HDR_SIZE);
+
+	/* up to sizeof(pkt->soft) has already been copied from the card */
+	memcpy(pkt, pkthdr, sizeof(struct archdr));
+	if (length > sizeof(pkt->soft))
+		lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
+				      pkt->soft.raw + sizeof(pkt->soft),
+				      length - sizeof(pkt->soft));
+
+	if (BUGLVL(D_SKB))
+		arcnet_dump_skb(dev, skb, "rx");
+
+	skb->protocol = cpu_to_be16(ETH_P_ARCNET);
+	netif_rx(skb);
+}
+
+/* Create the ARCnet hard/soft headers for raw mode.
+ * There aren't any soft headers in raw mode - not even the protocol id.
+ */
+static int build_header(struct sk_buff *skb, struct net_device *dev,
+			unsigned short type, uint8_t daddr)
+{
+	int hdr_size = ARC_HDR_SIZE;
+	struct archdr *pkt = (struct archdr *)skb_push(skb, hdr_size);
+
+	/* Set the source hardware address.
+	 *
+	 * This is pretty pointless for most purposes, but it can help in
+	 * debugging.  ARCnet does not allow us to change the source address
+	 * in the actual packet sent.
+	 */
+	pkt->hard.source = *dev->dev_addr;
+
+	/* see linux/net/ethernet/eth.c to see where I got the following */
+
+	if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
+		/* FIXME: fill in the last byte of the dest ipaddr here
+		 * to better comply with RFC1051 in "noarp" mode.
+		 */
+		pkt->hard.dest = 0;
+		return hdr_size;
+	}
+	/* otherwise, just fill it in and go! */
+	pkt->hard.dest = daddr;
+
+	return hdr_size;	/* success */
+}
+
+static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
+		      int bufnum)
+{
+	struct arcnet_local *lp = netdev_priv(dev);
+	struct arc_hardware *hard = &pkt->hard;
+	int ofs;
+
+	arc_printk(D_DURING, dev, "prepare_tx: txbufs=%d/%d/%d\n",
+		   lp->next_tx, lp->cur_tx, bufnum);
+
+	/* hard header is not included in packet length */
+	length -= ARC_HDR_SIZE;
+
+	if (length > XMTU) {
+		/* should never happen! other people already check for this. */
+		arc_printk(D_NORMAL, dev, "Bug!  prepare_tx with size %d (> %d)\n",
+			   length, XMTU);
+		length = XMTU;
+	}
+	if (length >= MinTU) {
+		hard->offset[0] = 0;
+		hard->offset[1] = ofs = 512 - length;
+	} else if (length > MTU) {
+		hard->offset[0] = 0;
+		hard->offset[1] = ofs = 512 - length - 3;
+	} else {
+		hard->offset[0] = ofs = 256 - length;
+	}
+
+	arc_printk(D_DURING, dev, "prepare_tx: length=%d ofs=%d\n",
+		   length, ofs);
+
+	lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
+	lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft, length);
+
+	lp->lastload_dest = hard->dest;
+
+	return 1;		/* done */
+}
+
+static struct ArcProto rawmode_proto = {
 	.suffix		= 'r',
 	.mtu		= XMTU,
 	.rx		= rx,
@@ -54,12 +160,11 @@
 	.ack_tx         = NULL
 };
 
-
 static int __init arcnet_raw_init(void)
 {
 	int count;
 
-	printk(VERSION);
+	pr_info("raw mode (`r') encapsulation support loaded\n");
 
 	for (count = 0; count < 256; count++)
 		if (arc_proto_map[count] == arc_proto_default)
@@ -82,122 +187,3 @@
 module_exit(arcnet_raw_exit);
 
 MODULE_LICENSE("GPL");
-
-
-/* packet receiver */
-static void rx(struct net_device *dev, int bufnum,
-	       struct archdr *pkthdr, int length)
-{
-	struct arcnet_local *lp = netdev_priv(dev);
-	struct sk_buff *skb;
-	struct archdr *pkt = pkthdr;
-	int ofs;
-
-	BUGMSG(D_DURING, "it's a raw packet (length=%d)\n", length);
-
-	if (length > MTU)
-		ofs = 512 - length;
-	else
-		ofs = 256 - length;
-
-	skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
-	if (skb == NULL) {
-		BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
-		dev->stats.rx_dropped++;
-		return;
-	}
-	skb_put(skb, length + ARC_HDR_SIZE);
-	skb->dev = dev;
-
-	pkt = (struct archdr *) skb->data;
-
-	skb_reset_mac_header(skb);
-	skb_pull(skb, ARC_HDR_SIZE);
-
-	/* up to sizeof(pkt->soft) has already been copied from the card */
-	memcpy(pkt, pkthdr, sizeof(struct archdr));
-	if (length > sizeof(pkt->soft))
-		lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
-				      pkt->soft.raw + sizeof(pkt->soft),
-				      length - sizeof(pkt->soft));
-
-	BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
-
-	skb->protocol = cpu_to_be16(ETH_P_ARCNET);
-	netif_rx(skb);
-}
-
-
-/*
- * Create the ARCnet hard/soft headers for raw mode.
- * There aren't any soft headers in raw mode - not even the protocol id.
- */
-static int build_header(struct sk_buff *skb, struct net_device *dev,
-			unsigned short type, uint8_t daddr)
-{
-	int hdr_size = ARC_HDR_SIZE;
-	struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
-
-	/*
-	 * Set the source hardware address.
-	 *
-	 * This is pretty pointless for most purposes, but it can help in
-	 * debugging.  ARCnet does not allow us to change the source address in
-	 * the actual packet sent)
-	 */
-	pkt->hard.source = *dev->dev_addr;
-
-	/* see linux/net/ethernet/eth.c to see where I got the following */
-
-	if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
-		/* 
-		 * FIXME: fill in the last byte of the dest ipaddr here to better
-		 * comply with RFC1051 in "noarp" mode.
-		 */
-		pkt->hard.dest = 0;
-		return hdr_size;
-	}
-	/* otherwise, just fill it in and go! */
-	pkt->hard.dest = daddr;
-
-	return hdr_size;	/* success */
-}
-
-
-static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
-		      int bufnum)
-{
-	struct arcnet_local *lp = netdev_priv(dev);
-	struct arc_hardware *hard = &pkt->hard;
-	int ofs;
-
-	BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n",
-	       lp->next_tx, lp->cur_tx, bufnum);
-
-	length -= ARC_HDR_SIZE;	/* hard header is not included in packet length */
-
-	if (length > XMTU) {
-		/* should never happen! other people already check for this. */
-		BUGMSG(D_NORMAL, "Bug!  prepare_tx with size %d (> %d)\n",
-		       length, XMTU);
-		length = XMTU;
-	}
-	if (length >= MinTU) {
-		hard->offset[0] = 0;
-		hard->offset[1] = ofs = 512 - length;
-	} else if (length > MTU) {
-		hard->offset[0] = 0;
-		hard->offset[1] = ofs = 512 - length - 3;
-	} else
-		hard->offset[0] = ofs = 256 - length;
-
-	BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
-	       length,ofs);
-
-	lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
-	lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft, length);
-
-	lp->lastload_dest = hard->dest;
-
-	return 1;		/* done */
-}
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index b8b4c7b..a07e249 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -1,6 +1,6 @@
 /*
  * Linux ARCnet driver - "RIM I" (entirely mem-mapped) cards
- * 
+ *
  * Written 1994-1999 by Avery Pennarun.
  * Written 1999-2000 by Martin Mares <mj@ucw.cz>.
  * Derived from skeleton.c by Donald Becker.
@@ -24,6 +24,9 @@
  *
  * **********************
  */
+
+#define pr_fmt(fmt) "arcnet:" KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -33,12 +36,10 @@
 #include <linux/bootmem.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <asm/io.h>
-#include <linux/arcdevice.h>
+#include <linux/io.h>
 
-
-#define VERSION "arcnet: RIM I (entirely mem-mapped) support\n"
-
+#include "arcdevice.h"
+#include "com9026.h"
 
 /* Internal function declarations */
 
@@ -50,66 +51,46 @@
 static int arcrimi_reset(struct net_device *dev, int really_reset);
 static void arcrimi_copy_to_card(struct net_device *dev, int bufnum, int offset,
 				 void *buf, int count);
-static void arcrimi_copy_from_card(struct net_device *dev, int bufnum, int offset,
-				   void *buf, int count);
+static void arcrimi_copy_from_card(struct net_device *dev, int bufnum,
+				   int offset, void *buf, int count);
 
 /* Handy defines for ARCnet specific stuff */
 
 /* Amount of I/O memory used by the card */
-#define BUFFER_SIZE (512)
-#define MIRROR_SIZE (BUFFER_SIZE*4)
+#define BUFFER_SIZE	(512)
+#define MIRROR_SIZE	(BUFFER_SIZE * 4)
 
-/* COM 9026 controller chip --> ARCnet register addresses */
-#define _INTMASK (ioaddr+0)	/* writable */
-#define _STATUS  (ioaddr+0)	/* readable */
-#define _COMMAND (ioaddr+1)	/* writable, returns random vals on read (?) */
-#define _RESET  (ioaddr+8)	/* software reset (on read) */
-#define _MEMDATA  (ioaddr+12)	/* Data port for IO-mapped memory */
-#define _ADDR_HI  (ioaddr+15)	/* Control registers for said */
-#define _ADDR_LO  (ioaddr+14)
-#define _CONFIG  (ioaddr+2)	/* Configuration register */
-
-#undef ASTATUS
-#undef ACOMMAND
-#undef AINTMASK
-
-#define ASTATUS()	readb(_STATUS)
-#define ACOMMAND(cmd)	writeb((cmd),_COMMAND)
-#define AINTMASK(msk)	writeb((msk),_INTMASK)
-#define SETCONF()	writeb(lp->config,_CONFIG)
-
-
-/*
- * We cannot probe for a RIM I card; one reason is I don't know how to reset
+/* We cannot probe for a RIM I card; one reason is I don't know how to reset
  * them.  In fact, we can't even get their node ID automatically.  So, we
  * need to be passed a specific shmem address, IRQ, and node ID.
  */
 static int __init arcrimi_probe(struct net_device *dev)
 {
-	BUGLVL(D_NORMAL) printk(VERSION);
-	BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n");
-
-	BUGLVL(D_NORMAL) printk("Given: node %02Xh, shmem %lXh, irq %d\n",
-	       dev->dev_addr[0], dev->mem_start, dev->irq);
+	if (BUGLVL(D_NORMAL)) {
+		pr_info("%s\n", "RIM I (entirely mem-mapped) support");
+		pr_info("E-mail me if you actually test the RIM I driver, please!\n");
+		pr_info("Given: node %02Xh, shmem %lXh, irq %d\n",
+			dev->dev_addr[0], dev->mem_start, dev->irq);
+	}
 
 	if (dev->mem_start <= 0 || dev->irq <= 0) {
-		BUGLVL(D_NORMAL) printk("No autoprobe for RIM I; you "
-		       "must specify the shmem and irq!\n");
+		if (BUGLVL(D_NORMAL))
+			pr_err("No autoprobe for RIM I; you must specify the shmem and irq!\n");
 		return -ENODEV;
 	}
 	if (dev->dev_addr[0] == 0) {
-		BUGLVL(D_NORMAL) printk("You need to specify your card's station "
-		       "ID!\n");
+		if (BUGLVL(D_NORMAL))
+			pr_err("You need to specify your card's station ID!\n");
 		return -ENODEV;
 	}
-	/*
-	 * Grab the memory region at mem_start for MIRROR_SIZE bytes.
+	/* Grab the memory region at mem_start for MIRROR_SIZE bytes.
 	 * Later in arcrimi_found() the real size will be determined
 	 * and this reserve will be released and the correct size
 	 * will be taken.
 	 */
 	if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) {
-		BUGLVL(D_NORMAL) printk("Card memory already allocated\n");
+		if (BUGLVL(D_NORMAL))
+			pr_notice("Card memory already allocated\n");
 		return -ENODEV;
 	}
 	return arcrimi_found(dev);
@@ -125,7 +106,7 @@
 
 	p = ioremap(addr, size);
 	if (p) {
-		if (readb(p) == TESTvalue)
+		if (arcnet_readb(p, COM9026_REG_R_STATUS) == TESTvalue)
 			res = 1;
 		else
 			res = 0;
@@ -136,9 +117,8 @@
 	return res;
 }
 
-/*
- * Set up the struct net_device associated with this card.  Called after
- * probing succeeds.
+/* Set up the struct net_device associated with this card.
+ * Called after probing succeeds.
  */
 static int __init arcrimi_found(struct net_device *dev)
 {
@@ -151,7 +131,7 @@
 	p = ioremap(dev->mem_start, MIRROR_SIZE);
 	if (!p) {
 		release_mem_region(dev->mem_start, MIRROR_SIZE);
-		BUGMSG(D_NORMAL, "Can't ioremap\n");
+		arc_printk(D_NORMAL, dev, "Can't ioremap\n");
 		return -ENODEV;
 	}
 
@@ -159,13 +139,14 @@
 	if (request_irq(dev->irq, arcnet_interrupt, 0, "arcnet (RIM I)", dev)) {
 		iounmap(p);
 		release_mem_region(dev->mem_start, MIRROR_SIZE);
-		BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
+		arc_printk(D_NORMAL, dev, "Can't get IRQ %d!\n", dev->irq);
 		return -ENODEV;
 	}
 
 	shmem = dev->mem_start;
-	writeb(TESTvalue, p);
-	writeb(dev->dev_addr[0], p + 1);	/* actually the node ID */
+	arcnet_writeb(TESTvalue, p, COM9026_REG_W_INTMASK);
+	arcnet_writeb(TESTvalue, p, COM9026_REG_W_COMMAND);
+					/* actually the station/node ID */
 
 	/* find the real shared memory start/end points, including mirrors */
 
@@ -174,7 +155,7 @@
 	 * 2k (or there are no mirrors at all) but on some, it's 4k.
 	 */
 	mirror_size = MIRROR_SIZE;
-	if (readb(p) == TESTvalue &&
+	if (arcnet_readb(p, COM9026_REG_R_STATUS) == TESTvalue &&
 	    check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 &&
 	    check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1)
 		mirror_size = 2 * MIRROR_SIZE;
@@ -204,8 +185,7 @@
 	lp->hw.copy_to_card = arcrimi_copy_to_card;
 	lp->hw.copy_from_card = arcrimi_copy_from_card;
 
-	/*
-	 * re-reserve the memory region - arcrimi_probe() alloced this reqion
+	/* re-reserve the memory region - arcrimi_probe() alloced this reqion
 	 * but didn't know the real size.  Free that region and then re-get
 	 * with the correct size.  There is a VERY slim chance this could
 	 * fail.
@@ -215,24 +195,25 @@
 	if (!request_mem_region(dev->mem_start,
 				dev->mem_end - dev->mem_start + 1,
 				"arcnet (90xx)")) {
-		BUGMSG(D_NORMAL, "Card memory already allocated\n");
+		arc_printk(D_NORMAL, dev, "Card memory already allocated\n");
 		goto err_free_irq;
 	}
 
-	lp->mem_start = ioremap(dev->mem_start, dev->mem_end - dev->mem_start + 1);
+	lp->mem_start = ioremap(dev->mem_start,
+				dev->mem_end - dev->mem_start + 1);
 	if (!lp->mem_start) {
-		BUGMSG(D_NORMAL, "Can't remap device memory!\n");
+		arc_printk(D_NORMAL, dev, "Can't remap device memory!\n");
 		goto err_release_mem;
 	}
 
 	/* get and check the station ID from offset 1 in shmem */
-	dev->dev_addr[0] = readb(lp->mem_start + 1);
+	dev->dev_addr[0] = arcnet_readb(lp->mem_start, COM9026_REG_R_STATION);
 
-	BUGMSG(D_NORMAL, "ARCnet RIM I: station %02Xh found at IRQ %d, "
-	       "ShMem %lXh (%ld*%d bytes).\n",
-	       dev->dev_addr[0],
-	       dev->irq, dev->mem_start,
-	 (dev->mem_end - dev->mem_start + 1) / mirror_size, mirror_size);
+	arc_printk(D_NORMAL, dev, "ARCnet RIM I: station %02Xh found at IRQ %d, ShMem %lXh (%ld*%d bytes)\n",
+		   dev->dev_addr[0],
+		   dev->irq, dev->mem_start,
+		   (dev->mem_end - dev->mem_start + 1) / mirror_size,
+		   mirror_size);
 
 	err = register_netdev(dev);
 	if (err)
@@ -249,9 +230,7 @@
 	return -EIO;
 }
 
-
-/*
- * Do a hardware reset on the card, and set up necessary registers.
+/* Do a hardware reset on the card, and set up necessary registers.
  *
  * This should be called as little as possible, because it disrupts the
  * token on the network (causes a RECON) and requires a significant delay.
@@ -263,17 +242,19 @@
 	struct arcnet_local *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->mem_start + 0x800;
 
-	BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", dev->name, ASTATUS());
+	arc_printk(D_INIT, dev, "Resetting %s (status=%02Xh)\n",
+		   dev->name, arcnet_readb(ioaddr, COM9026_REG_R_STATUS));
 
 	if (really_reset) {
-		writeb(TESTvalue, ioaddr - 0x800);	/* fake reset */
+		arcnet_writeb(TESTvalue, ioaddr, -0x800);	/* fake reset */
 		return 0;
 	}
-	ACOMMAND(CFLAGScmd | RESETclear);	/* clear flags & end reset */
-	ACOMMAND(CFLAGScmd | CONFIGclear);
+	/* clear flags & end reset */
+	arcnet_writeb(CFLAGScmd | RESETclear, ioaddr, COM9026_REG_W_COMMAND);
+	arcnet_writeb(CFLAGScmd | CONFIGclear, ioaddr, COM9026_REG_W_COMMAND);
 
 	/* enable extended (512-byte) packets */
-	ACOMMAND(CONFIGcmd | EXTconf);
+	arcnet_writeb(CONFIGcmd | EXTconf, ioaddr, COM9026_REG_W_COMMAND);
 
 	/* done!  return success. */
 	return 0;
@@ -284,7 +265,7 @@
 	struct arcnet_local *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->mem_start + 0x800;
 
-	AINTMASK(mask);
+	arcnet_writeb(mask, ioaddr, COM9026_REG_W_INTMASK);
 }
 
 static int arcrimi_status(struct net_device *dev)
@@ -292,7 +273,7 @@
 	struct arcnet_local *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->mem_start + 0x800;
 
-	return ASTATUS();
+	return arcnet_readb(ioaddr, COM9026_REG_R_STATUS);
 }
 
 static void arcrimi_command(struct net_device *dev, int cmd)
@@ -300,7 +281,7 @@
 	struct arcnet_local *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->mem_start + 0x800;
 
-	ACOMMAND(cmd);
+	arcnet_writeb(cmd, ioaddr, COM9026_REG_W_COMMAND);
 }
 
 static void arcrimi_copy_to_card(struct net_device *dev, int bufnum, int offset,
@@ -308,16 +289,17 @@
 {
 	struct arcnet_local *lp = netdev_priv(dev);
 	void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset;
-	TIME("memcpy_toio", count, memcpy_toio(memaddr, buf, count));
+
+	TIME(dev, "memcpy_toio", count, memcpy_toio(memaddr, buf, count));
 }
 
-
-static void arcrimi_copy_from_card(struct net_device *dev, int bufnum, int offset,
-				   void *buf, int count)
+static void arcrimi_copy_from_card(struct net_device *dev, int bufnum,
+				   int offset, void *buf, int count)
 {
 	struct arcnet_local *lp = netdev_priv(dev);
 	void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset;
-	TIME("memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
+
+	TIME(dev, "memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
 }
 
 static int node;
@@ -374,12 +356,13 @@
 static int __init arcrimi_setup(char *s)
 {
 	int ints[8];
+
 	s = get_options(s, 8, ints);
 	if (!ints[0])
 		return 1;
 	switch (ints[0]) {
 	default:		/* ERROR */
-		printk("arcrimi: Too many arguments.\n");
+		pr_err("Too many arguments\n");
 	case 3:		/* Node ID */
 		node = ints[3];
 	case 2:		/* IRQ */
diff --git a/include/linux/arcdevice.h b/drivers/net/arcnet/arcdevice.h
similarity index 78%
rename from include/linux/arcdevice.h
rename to drivers/net/arcnet/arcdevice.h
index df03562..d7fdea1 100644
--- a/include/linux/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -34,7 +34,6 @@
  */
 #define RECON_THRESHOLD 30
 
-
 /*
  * Define this to the minimum "timeout" value.  If a transmit takes longer
  * than TX_TIMEOUT jiffies, Linux will abort the TX and retry.  On a large
@@ -44,14 +43,12 @@
  */
 #define TX_TIMEOUT (HZ * 200 / 1000)
 
-
 /* Display warnings about the driver being an ALPHA version. */
 #undef ALPHA_WARNING
 
-
 /*
  * Debugging bitflags: each option can be enabled individually.
- * 
+ *
  * Note: only debug flags included in the ARCNET_DEBUG_MAX define will
  *   actually be available.  GCC will (at least, GCC 2.7.0 will) notice
  *   lines using a BUGLVL not in ARCNET_DEBUG_MAX and automatically optimize
@@ -77,35 +74,47 @@
 #endif
 
 #ifndef ARCNET_DEBUG
-#define ARCNET_DEBUG (D_NORMAL|D_EXTRA)
+#define ARCNET_DEBUG (D_NORMAL | D_EXTRA)
 #endif
 extern int arcnet_debug;
 
+#define BUGLVL(x)	((x) & ARCNET_DEBUG_MAX & arcnet_debug)
+
 /* macros to simplify debug checking */
-#define BUGLVL(x) if ((ARCNET_DEBUG_MAX)&arcnet_debug&(x))
-#define BUGMSG2(x,msg,args...) do { BUGLVL(x) printk(msg, ## args); } while (0)
-#define BUGMSG(x,msg,args...) \
-	BUGMSG2(x, "%s%6s: " msg, \
-            x==D_NORMAL	? KERN_WARNING \
-            		: x < D_DURING ? KERN_INFO : KERN_DEBUG, \
-	    dev->name , ## args)
+#define arc_printk(x, dev, fmt, ...)					\
+do {									\
+	if (BUGLVL(x)) {						\
+		if ((x) == D_NORMAL)					\
+			netdev_warn(dev, fmt, ##__VA_ARGS__);		\
+		else if ((x) < D_DURING)				\
+			netdev_info(dev, fmt, ##__VA_ARGS__);		\
+		else							\
+			netdev_dbg(dev, fmt, ##__VA_ARGS__);		\
+	}								\
+} while (0)
+
+#define arc_cont(x, fmt, ...)						\
+do {									\
+	if (BUGLVL(x))							\
+		pr_cont(fmt, ##__VA_ARGS__);				\
+} while (0)
 
 /* see how long a function call takes to run, expressed in CPU cycles */
-#define TIME(name, bytes, call) BUGLVL(D_TIMING) { \
-	    unsigned long _x, _y; \
-	    _x = get_cycles(); \
-	    call; \
-	    _y = get_cycles(); \
-	    BUGMSG(D_TIMING, \
-	       "%s: %d bytes in %lu cycles == " \
-	       "%lu Kbytes/100Mcycle\n",\
-		   name, bytes, _y - _x, \
-		   100000000 / 1024 * bytes / (_y - _x + 1));\
-	} \
-	else { \
-		    call;\
-	}
-
+#define TIME(dev, name, bytes, call)					\
+do {									\
+	if (BUGLVL(D_TIMING)) {						\
+		unsigned long _x, _y;					\
+		_x = get_cycles();					\
+		call;							\
+		_y = get_cycles();					\
+		arc_printk(D_TIMING, dev,				\
+			   "%s: %d bytes in %lu cycles == %lu Kbytes/100Mcycle\n", \
+			   name, bytes, _y - _x,			\
+			   100000000 / 1024 * bytes / (_y - _x + 1));	\
+	} else {							\
+		call;							\
+	}								\
+} while (0)
 
 /*
  * Time needed to reset the card - in ms (milliseconds).  This works on my
@@ -155,6 +164,7 @@
 #define CONFIGcmd       0x05	/* define configuration */
 #define CFLAGScmd       0x06	/* clear flags */
 #define TESTcmd         0x07	/* load test flags */
+#define STARTIOcmd      0x18	/* start internal operation */
 
 /* flags for "clear flags" command */
 #define RESETclear      0x08	/* power-on-reset */
@@ -182,29 +192,27 @@
 #define ARC_CAN_10MBIT  2   /* card uses COM20022, supporting 10MBit,
 				 but default is 2.5MBit. */
 
-
 /* information needed to define an encapsulation driver */
 struct ArcProto {
 	char suffix;		/* a for RFC1201, e for ether-encap, etc. */
 	int mtu;		/* largest possible packet */
 	int is_ip;              /* This is a ip plugin - not a raw thing */
 
-	void (*rx) (struct net_device * dev, int bufnum,
-		    struct archdr * pkthdr, int length);
-	int (*build_header) (struct sk_buff * skb, struct net_device *dev,
-			     unsigned short ethproto, uint8_t daddr);
+	void (*rx)(struct net_device *dev, int bufnum,
+		   struct archdr *pkthdr, int length);
+	int (*build_header)(struct sk_buff *skb, struct net_device *dev,
+			    unsigned short ethproto, uint8_t daddr);
 
 	/* these functions return '1' if the skb can now be freed */
-	int (*prepare_tx) (struct net_device * dev, struct archdr * pkt, int length,
-			   int bufnum);
-	int (*continue_tx) (struct net_device * dev, int bufnum);
-	int (*ack_tx) (struct net_device * dev, int acked);
+	int (*prepare_tx)(struct net_device *dev, struct archdr *pkt,
+			  int length, int bufnum);
+	int (*continue_tx)(struct net_device *dev, int bufnum);
+	int (*ack_tx)(struct net_device *dev, int acked);
 };
 
 extern struct ArcProto *arc_proto_map[256], *arc_proto_default,
 	*arc_bcast_proto, *arc_raw_proto;
 
-
 /*
  * "Incoming" is information needed for each address that could be sending
  * to us.  Mostly for partially-received split packets.
@@ -216,7 +224,6 @@
 		numpackets;	/* number of packets in split     */
 };
 
-
 /* only needed for RFC1201 */
 struct Outgoing {
 	struct ArcProto *proto;	/* protocol driver that owns this:
@@ -230,7 +237,6 @@
 		numsegs;	/* number of segments */
 };
 
-
 struct arcnet_local {
 	uint8_t config,		/* current value of CONFIG register */
 		timeout,	/* Extended timeout for COM20020 */
@@ -251,7 +257,6 @@
 	char *card_name;	/* card ident string */
 	int card_flags;		/* special card features */
 
-
 	/* On preemtive and SMB a lock is needed */
 	spinlock_t lock;
 
@@ -263,13 +268,13 @@
 	 * situations in which we (for example) want to pre-load a transmit
 	 * buffer, or start receiving while we copy a received packet to
 	 * memory.
-	 * 
+	 *
 	 * The rules: only the interrupt handler is allowed to _add_ buffers to
 	 * the queue; thus, this doesn't require a lock.  Both the interrupt
 	 * handler and the transmit function will want to _remove_ buffers, so
 	 * we need to handle the situation where they try to do it at the same
 	 * time.
-	 * 
+	 *
 	 * If next_buf == first_free_buf, the queue is empty.  Since there are
 	 * only four possible buffers, the queue should never be full.
 	 */
@@ -298,34 +303,29 @@
 	/* hardware-specific functions */
 	struct {
 		struct module *owner;
-		void (*command) (struct net_device * dev, int cmd);
-		int (*status) (struct net_device * dev);
-		void (*intmask) (struct net_device * dev, int mask);
-		int (*reset) (struct net_device * dev, int really_reset);
-		void (*open) (struct net_device * dev);
-		void (*close) (struct net_device * dev);
+		void (*command)(struct net_device *dev, int cmd);
+		int (*status)(struct net_device *dev);
+		void (*intmask)(struct net_device *dev, int mask);
+		int (*reset)(struct net_device *dev, int really_reset);
+		void (*open)(struct net_device *dev);
+		void (*close)(struct net_device *dev);
 
-		void (*copy_to_card) (struct net_device * dev, int bufnum, int offset,
-				      void *buf, int count);
-		void (*copy_from_card) (struct net_device * dev, int bufnum, int offset,
-					void *buf, int count);
+		void (*copy_to_card)(struct net_device *dev, int bufnum,
+				     int offset, void *buf, int count);
+		void (*copy_from_card)(struct net_device *dev, int bufnum,
+				       int offset, void *buf, int count);
 	} hw;
 
 	void __iomem *mem_start;	/* pointer to ioremap'ed MMIO */
 };
 
-
-#define ARCRESET(x)  (lp->hw.reset(dev, (x)))
-#define ACOMMAND(x)  (lp->hw.command(dev, (x)))
-#define ASTATUS()    (lp->hw.status(dev))
-#define AINTMASK(x)  (lp->hw.intmask(dev, (x)))
-
-
-
 #if ARCNET_DEBUG_MAX & D_SKB
 void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
 #else
-#define arcnet_dump_skb(dev,skb,desc) ;
+static inline
+void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc)
+{
+}
 #endif
 
 void arcnet_unregister_proto(struct ArcProto *proto);
@@ -335,8 +335,34 @@
 int arcnet_open(struct net_device *dev);
 int arcnet_close(struct net_device *dev);
 netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
-				     struct net_device *dev);
+			       struct net_device *dev);
 void arcnet_timeout(struct net_device *dev);
 
+/* I/O equivalents */
+
+#ifdef CONFIG_SA1100_CT6001
+#define BUS_ALIGN  2  /* 8 bit device on a 16 bit bus - needs padding */
+#else
+#define BUS_ALIGN  1
+#endif
+
+/* addr and offset allow register like names to define the actual IO  address.
+ * A configuration option multiplies the offset for alignment.
+ */
+#define arcnet_inb(addr, offset)					\
+	inb((addr) + BUS_ALIGN * (offset))
+#define arcnet_outb(value, addr, offset)				\
+	outb(value, (addr) + BUS_ALIGN * (offset))
+
+#define arcnet_insb(addr, offset, buffer, count)			\
+	insb((addr) + BUS_ALIGN * (offset), buffer, count)
+#define arcnet_outsb(addr, offset, buffer, count)			\
+	outsb((addr) + BUS_ALIGN * (offset), buffer, count)
+
+#define arcnet_readb(addr, offset)					\
+	readb((addr) + (offset))
+#define arcnet_writeb(value, addr, offset)				\
+	writeb(value, (addr) + (offset))
+
 #endif				/* __KERNEL__ */
 #endif				/* _LINUX_ARCDEVICE_H */
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 10f71c73..e41dd36 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -1,6 +1,6 @@
 /*
  * Linux ARCnet driver - device-independent routines
- * 
+ *
  * Written 1997 by David Woodhouse.
  * Written 1994-1999 by Avery Pennarun.
  * Written 1999-2000 by Martin Mares <mj@ucw.cz>.
@@ -20,12 +20,12 @@
  * modified by SRC, incorporated herein by reference.
  *
  * **********************
- * 
+ *
  * The change log is now in a file called ChangeLog in this directory.
  *
  * Sources:
  *  - Crynwr arcnet.com/arcether.com packet drivers.
- *  - arcnet.c v0.00 dated 1/1/94 and apparently by 
+ *  - arcnet.c v0.00 dated 1/1/94 and apparently by
  *     Donald Becker - it didn't work :)
  *  - skeleton.c v0.05 dated 11/16/93 by Donald Becker
  *     (from Linux Kernel 1.1.45)
@@ -41,7 +41,7 @@
  *     <jojo@repas.de>
  */
 
-#define VERSION "arcnet: v3.94 BETA 2007/02/08 - by Avery Pennarun et al.\n"
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
 #include <linux/types.h>
@@ -50,9 +50,11 @@
 #include <linux/if_arp.h>
 #include <net/arp.h>
 #include <linux/init.h>
-#include <linux/arcdevice.h>
 #include <linux/jiffies.h>
 
+#include "arcdevice.h"
+#include "com9026.h"
+
 /* "do nothing" functions for protocol drivers */
 static void null_rx(struct net_device *dev, int bufnum,
 		    struct archdr *pkthdr, int length);
@@ -63,17 +65,24 @@
 
 static void arcnet_rx(struct net_device *dev, int bufnum);
 
-/*
- * one ArcProto per possible proto ID.  None of the elements of
+/* one ArcProto per possible proto ID.  None of the elements of
  * arc_proto_map are allowed to be NULL; they will get set to
  * arc_proto_default instead.  It also must not be NULL; if you would like
  * to set it to NULL, set it to &arc_proto_null instead.
  */
- struct ArcProto *arc_proto_map[256], *arc_proto_default,
-   *arc_bcast_proto, *arc_raw_proto;
+struct ArcProto *arc_proto_map[256];
+EXPORT_SYMBOL(arc_proto_map);
 
-static struct ArcProto arc_proto_null =
-{
+struct ArcProto *arc_proto_default;
+EXPORT_SYMBOL(arc_proto_default);
+
+struct ArcProto *arc_bcast_proto;
+EXPORT_SYMBOL(arc_bcast_proto);
+
+struct ArcProto *arc_raw_proto;
+EXPORT_SYMBOL(arc_raw_proto);
+
+static struct ArcProto arc_proto_null = {
 	.suffix		= '?',
 	.mtu		= XMTU,
 	.is_ip          = 0,
@@ -86,19 +95,7 @@
 
 /* Exported function prototypes */
 int arcnet_debug = ARCNET_DEBUG;
-
-EXPORT_SYMBOL(arc_proto_map);
-EXPORT_SYMBOL(arc_proto_default);
-EXPORT_SYMBOL(arc_bcast_proto);
-EXPORT_SYMBOL(arc_raw_proto);
-EXPORT_SYMBOL(arcnet_unregister_proto);
 EXPORT_SYMBOL(arcnet_debug);
-EXPORT_SYMBOL(alloc_arcdev);
-EXPORT_SYMBOL(arcnet_interrupt);
-EXPORT_SYMBOL(arcnet_open);
-EXPORT_SYMBOL(arcnet_close);
-EXPORT_SYMBOL(arcnet_send_packet);
-EXPORT_SYMBOL(arcnet_timeout);
 
 /* Internal function prototypes */
 static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
@@ -116,29 +113,20 @@
 
 	arcnet_debug = debug;
 
-	printk("arcnet loaded.\n");
-
-#ifdef ALPHA_WARNING
-	BUGLVL(D_EXTRA) {
-		printk("arcnet: ***\n"
-		"arcnet: * Read arcnet.txt for important release notes!\n"
-		       "arcnet: *\n"
-		       "arcnet: * This is an ALPHA version! (Last stable release: v3.02)  E-mail\n"
-		       "arcnet: * me if you have any questions, comments, or bug reports.\n"
-		       "arcnet: ***\n");
-	}
-#endif
+	pr_info("arcnet loaded\n");
 
 	/* initialize the protocol map */
 	arc_raw_proto = arc_proto_default = arc_bcast_proto = &arc_proto_null;
 	for (count = 0; count < 256; count++)
 		arc_proto_map[count] = arc_proto_default;
 
-	BUGLVL(D_DURING)
-	    printk("arcnet: struct sizes: %Zd %Zd %Zd %Zd %Zd\n",
-		 sizeof(struct arc_hardware), sizeof(struct arc_rfc1201),
-		sizeof(struct arc_rfc1051), sizeof(struct arc_eth_encap),
-		   sizeof(struct archdr));
+	if (BUGLVL(D_DURING))
+		pr_info("struct sizes: %Zd %Zd %Zd %Zd %Zd\n",
+			sizeof(struct arc_hardware),
+			sizeof(struct arc_rfc1201),
+			sizeof(struct arc_rfc1051),
+			sizeof(struct arc_eth_encap),
+			sizeof(struct archdr));
 
 	return 0;
 }
@@ -150,9 +138,7 @@
 module_init(arcnet_init);
 module_exit(arcnet_exit);
 
-/*
- * Dump the contents of an sk_buff
- */
+/* Dump the contents of an sk_buff */
 #if ARCNET_DEBUG_MAX & D_SKB
 void arcnet_dump_skb(struct net_device *dev,
 		     struct sk_buff *skb, char *desc)
@@ -164,14 +150,10 @@
 	print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET,
 		       16, 1, skb->data, skb->len, true);
 }
-
 EXPORT_SYMBOL(arcnet_dump_skb);
 #endif
 
-
-/*
- * Dump the contents of an ARCnet buffer
- */
+/* Dump the contents of an ARCnet buffer */
 #if (ARCNET_DEBUG_MAX & (D_RX | D_TX))
 static void arcnet_dump_packet(struct net_device *dev, int bufnum,
 			       char *desc, int take_arcnet_lock)
@@ -183,12 +165,13 @@
 	char hdr[32];
 
 	/* hw.copy_from_card expects IRQ context so take the IRQ lock
-	   to keep it single threaded */
-	if(take_arcnet_lock)
+	 * to keep it single threaded
+	 */
+	if (take_arcnet_lock)
 		spin_lock_irqsave(&lp->lock, flags);
 
 	lp->hw.copy_from_card(dev, bufnum, 0, buf, 512);
-	if(take_arcnet_lock)
+	if (take_arcnet_lock)
 		spin_unlock_irqrestore(&lp->lock, flags);
 
 	/* if the offset[0] byte is nonzero, this is a 256-byte packet */
@@ -202,13 +185,11 @@
 
 #else
 
-#define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) do { } while (0)
+#define arcnet_dump_packet(dev, bufnum, desc, take_arcnet_lock) do { } while (0)
 
 #endif
 
-
-/*
- * Unregister a protocol driver from the arc_proto_map.  Protocol drivers
+/* Unregister a protocol driver from the arc_proto_map.  Protocol drivers
  * are responsible for registering themselves, but the unregister routine
  * is pretty generic so we'll do it here.
  */
@@ -228,12 +209,11 @@
 			arc_proto_map[count] = arc_proto_default;
 	}
 }
+EXPORT_SYMBOL(arcnet_unregister_proto);
 
-
-/*
- * Add a buffer to the queue.  Only the interrupt handler is allowed to do
+/* Add a buffer to the queue.  Only the interrupt handler is allowed to do
  * this, unless interrupts are disabled.
- * 
+ *
  * Note: we don't check for a full queue, since there aren't enough buffers
  * to more than fill it.
  */
@@ -245,19 +225,17 @@
 	lp->buf_queue[lp->first_free_buf++] = bufnum;
 	lp->first_free_buf %= 5;
 
-	BUGLVL(D_DURING) {
-		BUGMSG(D_DURING, "release_arcbuf: freed #%d; buffer queue is now: ",
-		       bufnum);
-		for (i = lp->next_buf; i != lp->first_free_buf; i = (i+1) % 5)
-			BUGMSG2(D_DURING, "#%d ", lp->buf_queue[i]);
-		BUGMSG2(D_DURING, "\n");
+	if (BUGLVL(D_DURING)) {
+		arc_printk(D_DURING, dev, "release_arcbuf: freed #%d; buffer queue is now: ",
+			   bufnum);
+		for (i = lp->next_buf; i != lp->first_free_buf; i = (i + 1) % 5)
+			arc_cont(D_DURING, "#%d ", lp->buf_queue[i]);
+		arc_cont(D_DURING, "\n");
 	}
 }
 
-
-/*
- * Get a buffer from the queue.  If this returns -1, there are no buffers
- * available.
+/* Get a buffer from the queue.
+ * If this returns -1, there are no buffers available.
  */
 static int get_arcbuf(struct net_device *dev)
 {
@@ -266,34 +244,32 @@
 
 	if (!atomic_dec_and_test(&lp->buf_lock)) {
 		/* already in this function */
-		BUGMSG(D_NORMAL, "get_arcbuf: overlap (%d)!\n",
-		       lp->buf_lock.counter);
-	}
-	else {			/* we can continue */
+		arc_printk(D_NORMAL, dev, "get_arcbuf: overlap (%d)!\n",
+			   lp->buf_lock.counter);
+	} else {			/* we can continue */
 		if (lp->next_buf >= 5)
 			lp->next_buf -= 5;
 
-		if (lp->next_buf == lp->first_free_buf)
-			BUGMSG(D_NORMAL, "get_arcbuf: BUG: no buffers are available??\n");
-		else {
+		if (lp->next_buf == lp->first_free_buf) {
+			arc_printk(D_NORMAL, dev, "get_arcbuf: BUG: no buffers are available??\n");
+		} else {
 			buf = lp->buf_queue[lp->next_buf++];
 			lp->next_buf %= 5;
 		}
 	}
 
-
-	BUGLVL(D_DURING) {
-		BUGMSG(D_DURING, "get_arcbuf: got #%d; buffer queue is now: ", buf);
-		for (i = lp->next_buf; i != lp->first_free_buf; i = (i+1) % 5)
-			BUGMSG2(D_DURING, "#%d ", lp->buf_queue[i]);
-		BUGMSG2(D_DURING, "\n");
+	if (BUGLVL(D_DURING)) {
+		arc_printk(D_DURING, dev, "get_arcbuf: got #%d; buffer queue is now: ",
+			   buf);
+		for (i = lp->next_buf; i != lp->first_free_buf; i = (i + 1) % 5)
+			arc_cont(D_DURING, "#%d ", lp->buf_queue[i]);
+		arc_cont(D_DURING, "\n");
 	}
 
 	atomic_inc(&lp->buf_lock);
 	return buf;
 }
 
-
 static int choose_mtu(void)
 {
 	int count, mtu = 65535;
@@ -326,7 +302,7 @@
 	dev->type = ARPHRD_ARCNET;
 	dev->netdev_ops = &arcnet_netdev_ops;
 	dev->header_ops = &arcnet_header_ops;
-	dev->hard_header_len = sizeof(struct archdr);
+	dev->hard_header_len = sizeof(struct arc_hardware);
 	dev->mtu = choose_mtu();
 
 	dev->addr_len = ARCNET_ALEN;
@@ -336,7 +312,6 @@
 
 	/* New-style flags. */
 	dev->flags = IFF_BROADCAST;
-
 }
 
 struct net_device *alloc_arcdev(const char *name)
@@ -346,16 +321,17 @@
 	dev = alloc_netdev(sizeof(struct arcnet_local),
 			   name && *name ? name : "arc%d", NET_NAME_UNKNOWN,
 			   arcdev_setup);
-	if(dev) {
+	if (dev) {
 		struct arcnet_local *lp = netdev_priv(dev);
+
 		spin_lock_init(&lp->lock);
 	}
 
 	return dev;
 }
+EXPORT_SYMBOL(alloc_arcdev);
 
-/*
- * Open/initialize the board.  This is called sometime after booting when
+/* Open/initialize the board.  This is called sometime after booting when
  * the 'ifconfig' program is run.
  *
  * This routine should set everything up anew at each open, even registers
@@ -367,34 +343,33 @@
 	struct arcnet_local *lp = netdev_priv(dev);
 	int count, newmtu, error;
 
-	BUGMSG(D_INIT,"opened.");
+	arc_printk(D_INIT, dev, "opened.");
 
 	if (!try_module_get(lp->hw.owner))
 		return -ENODEV;
 
-	BUGLVL(D_PROTO) {
-		BUGMSG(D_PROTO, "protocol map (default is '%c'): ",
-		       arc_proto_default->suffix);
+	if (BUGLVL(D_PROTO)) {
+		arc_printk(D_PROTO, dev, "protocol map (default is '%c'): ",
+			   arc_proto_default->suffix);
 		for (count = 0; count < 256; count++)
-			BUGMSG2(D_PROTO, "%c", arc_proto_map[count]->suffix);
-		BUGMSG2(D_PROTO, "\n");
+			arc_cont(D_PROTO, "%c", arc_proto_map[count]->suffix);
+		arc_cont(D_PROTO, "\n");
 	}
 
-
-	BUGMSG(D_INIT, "arcnet_open: resetting card.\n");
+	arc_printk(D_INIT, dev, "arcnet_open: resetting card.\n");
 
 	/* try to put the card in a defined state - if it fails the first
 	 * time, actually reset it.
 	 */
 	error = -ENODEV;
-	if (ARCRESET(0) && ARCRESET(1))
+	if (lp->hw.reset(dev, 0) && lp->hw.reset(dev, 1))
 		goto out_module_put;
 
 	newmtu = choose_mtu();
 	if (newmtu < dev->mtu)
 		dev->mtu = newmtu;
 
-	BUGMSG(D_INIT, "arcnet_open: mtu: %d.\n", dev->mtu);
+	arc_printk(D_INIT, dev, "arcnet_open: mtu: %d.\n", dev->mtu);
 
 	/* autodetect the encapsulation for each host. */
 	memset(lp->default_proto, 0, sizeof(lp->default_proto));
@@ -425,30 +400,28 @@
 		lp->hw.open(dev);
 
 	if (dev->dev_addr[0] == 0)
-		BUGMSG(D_NORMAL, "WARNING!  Station address 00 is reserved "
-		       "for broadcasts!\n");
+		arc_printk(D_NORMAL, dev, "WARNING!  Station address 00 is reserved for broadcasts!\n");
 	else if (dev->dev_addr[0] == 255)
-		BUGMSG(D_NORMAL, "WARNING!  Station address FF may confuse "
-		       "DOS networking programs!\n");
+		arc_printk(D_NORMAL, dev, "WARNING!  Station address FF may confuse DOS networking programs!\n");
 
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
-	if (ASTATUS() & RESETflag) {
-	  	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
-		ACOMMAND(CFLAGScmd | RESETclear);
+	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
+	if (lp->hw.status(dev) & RESETflag) {
+		arc_printk(D_DEBUG, dev, "%s: %d: %s\n",
+			   __FILE__, __LINE__, __func__);
+		lp->hw.command(dev, CFLAGScmd | RESETclear);
 	}
 
-
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
+	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
 	/* make sure we're ready to receive IRQ's. */
-	AINTMASK(0);
+	lp->hw.intmask(dev, 0);
 	udelay(1);		/* give it time to set the mask before
 				 * we reset it again. (may not even be
 				 * necessary)
 				 */
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
+	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
 	lp->intmask = NORXflag | RECONflag;
-	AINTMASK(lp->intmask);
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
+	lp->hw.intmask(dev, lp->intmask);
+	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
 
 	netif_start_queue(dev);
 
@@ -458,7 +431,7 @@
 	module_put(lp->hw.owner);
 	return error;
 }
-
+EXPORT_SYMBOL(arcnet_open);
 
 /* The inverse routine to arcnet_open - shuts down the card. */
 int arcnet_close(struct net_device *dev)
@@ -468,9 +441,9 @@
 	netif_stop_queue(dev);
 
 	/* flush TX and disable RX */
-	AINTMASK(0);
-	ACOMMAND(NOTXcmd);	/* stop transmit */
-	ACOMMAND(NORXcmd);	/* disable receive */
+	lp->hw.intmask(dev, 0);
+	lp->hw.command(dev, NOTXcmd);	/* stop transmit */
+	lp->hw.command(dev, NORXcmd);	/* disable receive */
 	mdelay(1);
 
 	/* shut down the card */
@@ -478,7 +451,7 @@
 	module_put(lp->hw.owner);
 	return 0;
 }
-
+EXPORT_SYMBOL(arcnet_close);
 
 static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
 			 unsigned short type, const void *daddr,
@@ -488,48 +461,44 @@
 	uint8_t _daddr, proto_num;
 	struct ArcProto *proto;
 
-	BUGMSG(D_DURING,
-	    "create header from %d to %d; protocol %d (%Xh); size %u.\n",
-	       saddr ? *(uint8_t *) saddr : -1,
-	       daddr ? *(uint8_t *) daddr : -1,
-	       type, type, len);
+	arc_printk(D_DURING, dev,
+		   "create header from %d to %d; protocol %d (%Xh); size %u.\n",
+		   saddr ? *(uint8_t *)saddr : -1,
+		   daddr ? *(uint8_t *)daddr : -1,
+		   type, type, len);
 
-	if (skb->len!=0 && len != skb->len)
-		BUGMSG(D_NORMAL, "arcnet_header: Yikes!  skb->len(%d) != len(%d)!\n",
-		       skb->len, len);
+	if (skb->len != 0 && len != skb->len)
+		arc_printk(D_NORMAL, dev, "arcnet_header: Yikes!  skb->len(%d) != len(%d)!\n",
+			   skb->len, len);
 
-
-  	/* Type is host order - ? */
-  	if(type == ETH_P_ARCNET) {
-  		proto = arc_raw_proto;
-  		BUGMSG(D_DEBUG, "arc_raw_proto used. proto='%c'\n",proto->suffix);
-  		_daddr = daddr ? *(uint8_t *) daddr : 0;
-  	}
-	else if (!daddr) {
-		/*
-		 * if the dest addr isn't provided, we can't choose an encapsulation!
-		 * Store the packet type (eg. ETH_P_IP) for now, and we'll push on a
-		 * real header when we do rebuild_header.
+	/* Type is host order - ? */
+	if (type == ETH_P_ARCNET) {
+		proto = arc_raw_proto;
+		arc_printk(D_DEBUG, dev, "arc_raw_proto used. proto='%c'\n",
+			   proto->suffix);
+		_daddr = daddr ? *(uint8_t *)daddr : 0;
+	} else if (!daddr) {
+		/* if the dest addr isn't provided, we can't choose an
+		 * encapsulation!  Store the packet type (eg. ETH_P_IP)
+		 * for now, and we'll push on a real header when we do
+		 * rebuild_header.
 		 */
-		*(uint16_t *) skb_push(skb, 2) = type;
-		/*
-		 * XXX: Why not use skb->mac_len?
-		 */
+		*(uint16_t *)skb_push(skb, 2) = type;
+		/* XXX: Why not use skb->mac_len? */
 		if (skb->network_header - skb->mac_header != 2)
-			BUGMSG(D_NORMAL, "arcnet_header: Yikes!  diff (%d) is not 2!\n",
-			       (int)(skb->network_header - skb->mac_header));
+			arc_printk(D_NORMAL, dev, "arcnet_header: Yikes!  diff (%u) is not 2!\n",
+				   skb->network_header - skb->mac_header);
 		return -2;	/* return error -- can't transmit yet! */
-	}
-	else {
+	} else {
 		/* otherwise, we can just add the header as usual. */
-		_daddr = *(uint8_t *) daddr;
+		_daddr = *(uint8_t *)daddr;
 		proto_num = lp->default_proto[_daddr];
 		proto = arc_proto_map[proto_num];
-		BUGMSG(D_DURING, "building header for %02Xh using protocol '%c'\n",
-		       proto_num, proto->suffix);
+		arc_printk(D_DURING, dev, "building header for %02Xh using protocol '%c'\n",
+			   proto_num, proto->suffix);
 		if (proto == &arc_proto_null && arc_bcast_proto != proto) {
-			BUGMSG(D_DURING, "actually, let's use '%c' instead.\n",
-			       arc_bcast_proto->suffix);
+			arc_printk(D_DURING, dev, "actually, let's use '%c' instead.\n",
+				   arc_bcast_proto->suffix);
 			proto = arc_bcast_proto;
 		}
 	}
@@ -538,7 +507,7 @@
 
 /* Called by the kernel in order to transmit a packet. */
 netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
-				     struct net_device *dev)
+			       struct net_device *dev)
 {
 	struct arcnet_local *lp = netdev_priv(dev);
 	struct archdr *pkt;
@@ -548,21 +517,22 @@
 	unsigned long flags;
 	int freeskb, retval;
 
-	BUGMSG(D_DURING,
-	       "transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n",
-	       ASTATUS(), lp->cur_tx, lp->next_tx, skb->len,skb->protocol);
+	arc_printk(D_DURING, dev,
+		   "transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n",
+		   lp->hw.status(dev), lp->cur_tx, lp->next_tx, skb->len, skb->protocol);
 
-	pkt = (struct archdr *) skb->data;
+	pkt = (struct archdr *)skb->data;
 	soft = &pkt->soft.rfc1201;
 	proto = arc_proto_map[soft->proto];
 
-	BUGMSG(D_SKB_SIZE, "skb: transmitting %d bytes to %02X\n",
-		skb->len, pkt->hard.dest);
-	BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "tx");
+	arc_printk(D_SKB_SIZE, dev, "skb: transmitting %d bytes to %02X\n",
+		   skb->len, pkt->hard.dest);
+	if (BUGLVL(D_SKB))
+		arcnet_dump_skb(dev, skb, "tx");
 
 	/* fits in one packet? */
 	if (skb->len - ARC_HDR_SIZE > XMTU && !proto->continue_tx) {
-		BUGMSG(D_NORMAL, "fixme: packet too large: compensating badly!\n");
+		arc_printk(D_NORMAL, dev, "fixme: packet too large: compensating badly!\n");
 		dev_kfree_skb(skb);
 		return NETDEV_TX_OK;	/* don't try again */
 	}
@@ -571,17 +541,18 @@
 	netif_stop_queue(dev);
 
 	spin_lock_irqsave(&lp->lock, flags);
-	AINTMASK(0);
-	if(lp->next_tx == -1)
+	lp->hw.intmask(dev, 0);
+	if (lp->next_tx == -1)
 		txbuf = get_arcbuf(dev);
-	else {
+	else
 		txbuf = -1;
-	}
+
 	if (txbuf != -1) {
 		if (proto->prepare_tx(dev, pkt, skb->len, txbuf) &&
 		    !proto->ack_tx) {
 			/* done right away and we don't want to acknowledge
-			   the package later - forget about it now */
+			 *  the package later - forget about it now
+			 */
 			dev->stats.tx_bytes += skb->len;
 			freeskb = 1;
 		} else {
@@ -594,9 +565,9 @@
 
 			if (proto->continue_tx &&
 			    proto->continue_tx(dev, txbuf)) {
-			  BUGMSG(D_NORMAL,
-				 "bug! continue_tx finished the first time! "
-				 "(proto='%c')\n", proto->suffix);
+				arc_printk(D_NORMAL, dev,
+					   "bug! continue_tx finished the first time! (proto='%c')\n",
+					   proto->suffix);
 			}
 		}
 		retval = NETDEV_TX_OK;
@@ -606,61 +577,62 @@
 		freeskb = 0;
 	}
 
-	BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS());
+	arc_printk(D_DEBUG, dev, "%s: %d: %s, status: %x\n",
+		   __FILE__, __LINE__, __func__, lp->hw.status(dev));
 	/* make sure we didn't ignore a TX IRQ while we were in here */
-	AINTMASK(0);
+	lp->hw.intmask(dev, 0);
 
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
-	lp->intmask |= TXFREEflag|EXCNAKflag;
-	AINTMASK(lp->intmask);
-	BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS());
+	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
+	lp->intmask |= TXFREEflag | EXCNAKflag;
+	lp->hw.intmask(dev, lp->intmask);
+	arc_printk(D_DEBUG, dev, "%s: %d: %s, status: %x\n",
+		   __FILE__, __LINE__, __func__, lp->hw.status(dev));
 
 	spin_unlock_irqrestore(&lp->lock, flags);
-	if (freeskb) {
+	if (freeskb)
 		dev_kfree_skb(skb);
-	}
+
 	return retval;		/* no need to try again */
 }
+EXPORT_SYMBOL(arcnet_send_packet);
 
-
-/*
- * Actually start transmitting a packet that was loaded into a buffer
+/* Actually start transmitting a packet that was loaded into a buffer
  * by prepare_tx.  This should _only_ be called by the interrupt handler.
  */
 static int go_tx(struct net_device *dev)
 {
 	struct arcnet_local *lp = netdev_priv(dev);
 
-	BUGMSG(D_DURING, "go_tx: status=%Xh, intmask=%Xh, next_tx=%d, cur_tx=%d\n",
-	       ASTATUS(), lp->intmask, lp->next_tx, lp->cur_tx);
+	arc_printk(D_DURING, dev, "go_tx: status=%Xh, intmask=%Xh, next_tx=%d, cur_tx=%d\n",
+		   lp->hw.status(dev), lp->intmask, lp->next_tx, lp->cur_tx);
 
 	if (lp->cur_tx != -1 || lp->next_tx == -1)
 		return 0;
 
-	BUGLVL(D_TX) arcnet_dump_packet(dev, lp->next_tx, "go_tx", 0);
+	if (BUGLVL(D_TX))
+		arcnet_dump_packet(dev, lp->next_tx, "go_tx", 0);
 
 	lp->cur_tx = lp->next_tx;
 	lp->next_tx = -1;
 
 	/* start sending */
-	ACOMMAND(TXcmd | (lp->cur_tx << 3));
+	lp->hw.command(dev, TXcmd | (lp->cur_tx << 3));
 
 	dev->stats.tx_packets++;
 	lp->lasttrans_dest = lp->lastload_dest;
 	lp->lastload_dest = 0;
 	lp->excnak_pending = 0;
-	lp->intmask |= TXFREEflag|EXCNAKflag;
+	lp->intmask |= TXFREEflag | EXCNAKflag;
 
 	return 1;
 }
 
-
 /* Called by the kernel when transmit times out */
 void arcnet_timeout(struct net_device *dev)
 {
 	unsigned long flags;
 	struct arcnet_local *lp = netdev_priv(dev);
-	int status = ASTATUS();
+	int status = lp->hw.status(dev);
 	char *msg;
 
 	spin_lock_irqsave(&lp->lock, flags);
@@ -670,30 +642,29 @@
 		msg = "";
 		dev->stats.tx_aborted_errors++;
 		lp->timed_out = 1;
-		ACOMMAND(NOTXcmd | (lp->cur_tx << 3));
+		lp->hw.command(dev, NOTXcmd | (lp->cur_tx << 3));
 	}
 	dev->stats.tx_errors++;
 
 	/* make sure we didn't miss a TX or a EXC NAK IRQ */
-	AINTMASK(0);
-	lp->intmask |= TXFREEflag|EXCNAKflag;
-	AINTMASK(lp->intmask);
-	
+	lp->hw.intmask(dev, 0);
+	lp->intmask |= TXFREEflag | EXCNAKflag;
+	lp->hw.intmask(dev, lp->intmask);
+
 	spin_unlock_irqrestore(&lp->lock, flags);
 
-	if (time_after(jiffies, lp->last_timeout + 10*HZ)) {
-		BUGMSG(D_EXTRA, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n",
-		       msg, status, lp->intmask, lp->lasttrans_dest);
+	if (time_after(jiffies, lp->last_timeout + 10 * HZ)) {
+		arc_printk(D_EXTRA, dev, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n",
+			   msg, status, lp->intmask, lp->lasttrans_dest);
 		lp->last_timeout = jiffies;
 	}
 
 	if (lp->cur_tx == -1)
 		netif_wake_queue(dev);
 }
+EXPORT_SYMBOL(arcnet_timeout);
 
-
-/*
- * The typical workload of the driver: Handle the network interface
+/* The typical workload of the driver: Handle the network interface
  * interrupts. Establish which device needs attention, and call the correct
  * chipset interrupt handler.
  */
@@ -704,125 +675,125 @@
 	int recbuf, status, diagstatus, didsomething, boguscount;
 	int retval = IRQ_NONE;
 
-	BUGMSG(D_DURING, "\n");
+	arc_printk(D_DURING, dev, "\n");
 
-	BUGMSG(D_DURING, "in arcnet_interrupt\n");
+	arc_printk(D_DURING, dev, "in arcnet_interrupt\n");
 
 	lp = netdev_priv(dev);
 	BUG_ON(!lp);
-		
+
 	spin_lock(&lp->lock);
 
-	/*
-	 * RESET flag was enabled - if device is not running, we must clear it right
-	 * away (but nothing else).
+	/* RESET flag was enabled - if device is not running, we must
+	 * clear it right away (but nothing else).
 	 */
 	if (!netif_running(dev)) {
-		if (ASTATUS() & RESETflag)
-			ACOMMAND(CFLAGScmd | RESETclear);
-		AINTMASK(0);
+		if (lp->hw.status(dev) & RESETflag)
+			lp->hw.command(dev, CFLAGScmd | RESETclear);
+		lp->hw.intmask(dev, 0);
 		spin_unlock(&lp->lock);
 		return retval;
 	}
 
-	BUGMSG(D_DURING, "in arcnet_inthandler (status=%Xh, intmask=%Xh)\n",
-	       ASTATUS(), lp->intmask);
+	arc_printk(D_DURING, dev, "in arcnet_inthandler (status=%Xh, intmask=%Xh)\n",
+		   lp->hw.status(dev), lp->intmask);
 
 	boguscount = 5;
 	do {
-		status = ASTATUS();
-                diagstatus = (status >> 8) & 0xFF;
+		status = lp->hw.status(dev);
+		diagstatus = (status >> 8) & 0xFF;
 
-		BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n",
-			__FILE__,__LINE__,__func__,status);
+		arc_printk(D_DEBUG, dev, "%s: %d: %s: status=%x\n",
+			   __FILE__, __LINE__, __func__, status);
 		didsomething = 0;
 
-		/*
-		 * RESET flag was enabled - card is resetting and if RX is
+		/* RESET flag was enabled - card is resetting and if RX is
 		 * disabled, it's NOT because we just got a packet.
-		 * 
-		 * The card is in an undefined state.  Clear it out and start over.
+		 *
+		 * The card is in an undefined state.
+		 * Clear it out and start over.
 		 */
 		if (status & RESETflag) {
-			BUGMSG(D_NORMAL, "spurious reset (status=%Xh)\n", status);
+			arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n",
+				   status);
 			arcnet_close(dev);
 			arcnet_open(dev);
 
 			/* get out of the interrupt handler! */
 			break;
 		}
-		/* 
-		 * RX is inhibited - we must have received something. Prepare to
-		 * receive into the next buffer.
-		 * 
-		 * We don't actually copy the received packet from the card until
-		 * after the transmit handler runs (and possibly launches the next
-		 * tx); this should improve latency slightly if we get both types
-		 * of interrupts at once. 
+		/* RX is inhibited - we must have received something.
+		 * Prepare to receive into the next buffer.
+		 *
+		 * We don't actually copy the received packet from the card
+		 * until after the transmit handler runs (and possibly
+		 * launches the next tx); this should improve latency slightly
+		 * if we get both types of interrupts at once.
 		 */
 		recbuf = -1;
 		if (status & lp->intmask & NORXflag) {
 			recbuf = lp->cur_rx;
-			BUGMSG(D_DURING, "Buffer #%d: receive irq (status=%Xh)\n",
-			       recbuf, status);
+			arc_printk(D_DURING, dev, "Buffer #%d: receive irq (status=%Xh)\n",
+				   recbuf, status);
 
 			lp->cur_rx = get_arcbuf(dev);
 			if (lp->cur_rx != -1) {
-				BUGMSG(D_DURING, "enabling receive to buffer #%d\n",
-				       lp->cur_rx);
-				ACOMMAND(RXcmd | (lp->cur_rx << 3) | RXbcasts);
+				arc_printk(D_DURING, dev, "enabling receive to buffer #%d\n",
+					   lp->cur_rx);
+				lp->hw.command(dev, RXcmd | (lp->cur_rx << 3) | RXbcasts);
 			}
 			didsomething++;
 		}
 
-		if((diagstatus & EXCNAKflag)) {
-			BUGMSG(D_DURING, "EXCNAK IRQ (diagstat=%Xh)\n",
-			       diagstatus);
+		if ((diagstatus & EXCNAKflag)) {
+			arc_printk(D_DURING, dev, "EXCNAK IRQ (diagstat=%Xh)\n",
+				   diagstatus);
 
-                        ACOMMAND(NOTXcmd);      /* disable transmit */
-                        lp->excnak_pending = 1;
+			lp->hw.command(dev, NOTXcmd);      /* disable transmit */
+			lp->excnak_pending = 1;
 
-                        ACOMMAND(EXCNAKclear);
+			lp->hw.command(dev, EXCNAKclear);
 			lp->intmask &= ~(EXCNAKflag);
-                        didsomething++;
-                }
-
+			didsomething++;
+		}
 
 		/* a transmit finished, and we're interested in it. */
 		if ((status & lp->intmask & TXFREEflag) || lp->timed_out) {
-			lp->intmask &= ~(TXFREEflag|EXCNAKflag);
+			lp->intmask &= ~(TXFREEflag | EXCNAKflag);
 
-			BUGMSG(D_DURING, "TX IRQ (stat=%Xh)\n", status);
+			arc_printk(D_DURING, dev, "TX IRQ (stat=%Xh)\n",
+				   status);
 
 			if (lp->cur_tx != -1 && !lp->timed_out) {
-				if(!(status & TXACKflag)) {
+				if (!(status & TXACKflag)) {
 					if (lp->lasttrans_dest != 0) {
-						BUGMSG(D_EXTRA,
-						       "transmit was not acknowledged! "
-						       "(status=%Xh, dest=%02Xh)\n",
-						       status, lp->lasttrans_dest);
+						arc_printk(D_EXTRA, dev,
+							   "transmit was not acknowledged! (status=%Xh, dest=%02Xh)\n",
+							   status,
+							   lp->lasttrans_dest);
 						dev->stats.tx_errors++;
 						dev->stats.tx_carrier_errors++;
 					} else {
-						BUGMSG(D_DURING,
-						       "broadcast was not acknowledged; that's normal "
-						       "(status=%Xh, dest=%02Xh)\n",
-						       status, lp->lasttrans_dest);
+						arc_printk(D_DURING, dev,
+							   "broadcast was not acknowledged; that's normal (status=%Xh, dest=%02Xh)\n",
+							   status,
+							   lp->lasttrans_dest);
 					}
 				}
 
 				if (lp->outgoing.proto &&
 				    lp->outgoing.proto->ack_tx) {
-				  int ackstatus;
-				  if(status & TXACKflag)
-                                    ackstatus=2;
-                                  else if(lp->excnak_pending)
-                                    ackstatus=1;
-                                  else
-                                    ackstatus=0;
+					int ackstatus;
 
-                                  lp->outgoing.proto
-                                    ->ack_tx(dev, ackstatus);
+					if (status & TXACKflag)
+						ackstatus = 2;
+					else if (lp->excnak_pending)
+						ackstatus = 1;
+					else
+						ackstatus = 0;
+
+					lp->outgoing.proto
+						->ack_tx(dev, ackstatus);
 				}
 			}
 			if (lp->cur_tx != -1)
@@ -836,17 +807,18 @@
 			go_tx(dev);
 
 			/* continue a split packet, if any */
-			if (lp->outgoing.proto && lp->outgoing.proto->continue_tx) {
+			if (lp->outgoing.proto &&
+			    lp->outgoing.proto->continue_tx) {
 				int txbuf = get_arcbuf(dev);
+
 				if (txbuf != -1) {
 					if (lp->outgoing.proto->continue_tx(dev, txbuf)) {
 						/* that was the last segment */
 						dev->stats.tx_bytes += lp->outgoing.skb->len;
-						if(!lp->outgoing.proto->ack_tx)
-						  {
-						    dev_kfree_skb_irq(lp->outgoing.skb);
-						    lp->outgoing.proto = NULL;
-						  }
+						if (!lp->outgoing.proto->ack_tx) {
+							dev_kfree_skb_irq(lp->outgoing.skb);
+							lp->outgoing.proto = NULL;
+						}
 					}
 					lp->next_tx = txbuf;
 				}
@@ -857,7 +829,8 @@
 		}
 		/* now process the received packet, if any */
 		if (recbuf != -1) {
-			BUGLVL(D_RX) arcnet_dump_packet(dev, recbuf, "rx irq", 0);
+			if (BUGLVL(D_RX))
+				arcnet_dump_packet(dev, recbuf, "rx irq", 0);
 
 			arcnet_rx(dev, recbuf);
 			release_arcbuf(dev, recbuf);
@@ -865,32 +838,32 @@
 			didsomething++;
 		}
 		if (status & lp->intmask & RECONflag) {
-			ACOMMAND(CFLAGScmd | CONFIGclear);
+			lp->hw.command(dev, CFLAGScmd | CONFIGclear);
 			dev->stats.tx_carrier_errors++;
 
-			BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n",
-			       status);
+			arc_printk(D_RECON, dev, "Network reconfiguration detected (status=%Xh)\n",
+				   status);
 			/* MYRECON bit is at bit 7 of diagstatus */
-			if(diagstatus & 0x80)
-				BUGMSG(D_RECON,"Put out that recon myself\n");
+			if (diagstatus & 0x80)
+				arc_printk(D_RECON, dev, "Put out that recon myself\n");
 
 			/* is the RECON info empty or old? */
 			if (!lp->first_recon || !lp->last_recon ||
 			    time_after(jiffies, lp->last_recon + HZ * 10)) {
 				if (lp->network_down)
-					BUGMSG(D_NORMAL, "reconfiguration detected: cabling restored?\n");
+					arc_printk(D_NORMAL, dev, "reconfiguration detected: cabling restored?\n");
 				lp->first_recon = lp->last_recon = jiffies;
 				lp->num_recons = lp->network_down = 0;
 
-				BUGMSG(D_DURING, "recon: clearing counters.\n");
+				arc_printk(D_DURING, dev, "recon: clearing counters.\n");
 			} else {	/* add to current RECON counter */
 				lp->last_recon = jiffies;
 				lp->num_recons++;
 
-				BUGMSG(D_DURING, "recon: counter=%d, time=%lds, net=%d\n",
-				       lp->num_recons,
-				 (lp->last_recon - lp->first_recon) / HZ,
-				       lp->network_down);
+				arc_printk(D_DURING, dev, "recon: counter=%d, time=%lds, net=%d\n",
+					   lp->num_recons,
+					   (lp->last_recon - lp->first_recon) / HZ,
+					   lp->network_down);
 
 				/* if network is marked up;
 				 * and first_recon and last_recon are 60+ apart;
@@ -902,46 +875,44 @@
 				    (lp->last_recon - lp->first_recon) <= HZ * 60 &&
 				    lp->num_recons >= RECON_THRESHOLD) {
 					lp->network_down = 1;
-					BUGMSG(D_NORMAL, "many reconfigurations detected: cabling problem?\n");
+					arc_printk(D_NORMAL, dev, "many reconfigurations detected: cabling problem?\n");
 				} else if (!lp->network_down &&
 					   lp->last_recon - lp->first_recon > HZ * 60) {
-					/* reset counters if we've gone for over a minute. */
+					/* reset counters if we've gone for
+					 *  over a minute.
+					 */
 					lp->first_recon = lp->last_recon;
 					lp->num_recons = 1;
 				}
 			}
 		} else if (lp->network_down &&
-				time_after(jiffies, lp->last_recon + HZ * 10)) {
+			   time_after(jiffies, lp->last_recon + HZ * 10)) {
 			if (lp->network_down)
-				BUGMSG(D_NORMAL, "cabling restored?\n");
+				arc_printk(D_NORMAL, dev, "cabling restored?\n");
 			lp->first_recon = lp->last_recon = 0;
 			lp->num_recons = lp->network_down = 0;
 
-			BUGMSG(D_DURING, "not recon: clearing counters anyway.\n");
+			arc_printk(D_DURING, dev, "not recon: clearing counters anyway.\n");
 		}
 
-		if(didsomething) {
+		if (didsomething)
 			retval |= IRQ_HANDLED;
-		}
-	}
-	while (--boguscount && didsomething);
+	} while (--boguscount && didsomething);
 
-	BUGMSG(D_DURING, "arcnet_interrupt complete (status=%Xh, count=%d)\n",
-	       ASTATUS(), boguscount);
-	BUGMSG(D_DURING, "\n");
+	arc_printk(D_DURING, dev, "arcnet_interrupt complete (status=%Xh, count=%d)\n",
+		   lp->hw.status(dev), boguscount);
+	arc_printk(D_DURING, dev, "\n");
 
-
-	AINTMASK(0);
+	lp->hw.intmask(dev, 0);
 	udelay(1);
-	AINTMASK(lp->intmask);
-	
+	lp->hw.intmask(dev, lp->intmask);
+
 	spin_unlock(&lp->lock);
 	return retval;
 }
+EXPORT_SYMBOL(arcnet_interrupt);
 
-
-/*
- * This is a generic packet receiver that calls arcnet??_rx depending on the
+/* This is a generic packet receiver that calls arcnet??_rx depending on the
  * protocol ID found.
  */
 static void arcnet_rx(struct net_device *dev, int bufnum)
@@ -963,32 +934,31 @@
 	}
 
 	/* get the full header, if possible */
-	if (sizeof(pkt.soft) <= length)
+	if (sizeof(pkt.soft) <= length) {
 		lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft));
-	else {
+	} else {
 		memset(&pkt.soft, 0, sizeof(pkt.soft));
 		lp->hw.copy_from_card(dev, bufnum, ofs, soft, length);
 	}
 
-	BUGMSG(D_DURING, "Buffer #%d: received packet from %02Xh to %02Xh "
-	       "(%d+4 bytes)\n",
-	       bufnum, pkt.hard.source, pkt.hard.dest, length);
+	arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n",
+		   bufnum, pkt.hard.source, pkt.hard.dest, length);
 
 	dev->stats.rx_packets++;
 	dev->stats.rx_bytes += length + ARC_HDR_SIZE;
 
 	/* call the right receiver for the protocol */
 	if (arc_proto_map[soft->proto]->is_ip) {
-		BUGLVL(D_PROTO) {
+		if (BUGLVL(D_PROTO)) {
 			struct ArcProto
 			*oldp = arc_proto_map[lp->default_proto[pkt.hard.source]],
 			*newp = arc_proto_map[soft->proto];
 
 			if (oldp != newp) {
-				BUGMSG(D_PROTO,
-				       "got protocol %02Xh; encap for host %02Xh is now '%c'"
-				       " (was '%c')\n", soft->proto, pkt.hard.source,
-				       newp->suffix, oldp->suffix);
+				arc_printk(D_PROTO, dev,
+					   "got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n",
+					   soft->proto, pkt.hard.source,
+					   newp->suffix, oldp->suffix);
 			}
 		}
 
@@ -1002,30 +972,27 @@
 	arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length);
 }
 
-
 static void null_rx(struct net_device *dev, int bufnum,
 		    struct archdr *pkthdr, int length)
 {
-	BUGMSG(D_PROTO,
-	"rx: don't know how to deal with proto %02Xh from host %02Xh.\n",
-	       pkthdr->soft.rfc1201.proto, pkthdr->hard.source);
+	arc_printk(D_PROTO, dev,
+		   "rx: don't know how to deal with proto %02Xh from host %02Xh.\n",
+		   pkthdr->soft.rfc1201.proto, pkthdr->hard.source);
 }
 
-
 static int null_build_header(struct sk_buff *skb, struct net_device *dev,
 			     unsigned short type, uint8_t daddr)
 {
 	struct arcnet_local *lp = netdev_priv(dev);
 
-	BUGMSG(D_PROTO,
-	       "tx: can't build header for encap %02Xh; load a protocol driver.\n",
-	       lp->default_proto[daddr]);
+	arc_printk(D_PROTO, dev,
+		   "tx: can't build header for encap %02Xh; load a protocol driver.\n",
+		   lp->default_proto[daddr]);
 
 	/* always fails */
 	return 0;
 }
 
-
 /* the "do nothing" prepare_tx function warns that there's nothing to do. */
 static int null_prepare_tx(struct net_device *dev, struct archdr *pkt,
 			   int length, int bufnum)
@@ -1033,7 +1000,7 @@
 	struct arcnet_local *lp = netdev_priv(dev);
 	struct arc_hardware newpkt;
 
-	BUGMSG(D_PROTO, "tx: no encap for this host; load a protocol driver.\n");
+	arc_printk(D_PROTO, dev, "tx: no encap for this host; load a protocol driver.\n");
 
 	/* send a packet to myself -- will never get received, of course */
 	newpkt.source = newpkt.dest = dev->dev_addr[0];
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 42fce91..2056878 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -26,6 +26,8 @@
  * **********************
  */
 
+#define pr_fmt(fmt) "arcnet:" KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/gfp.h>
 #include <linux/init.h>
@@ -33,9 +35,8 @@
 #include <net/arp.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
-#include <linux/arcdevice.h>
 
-#define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n"
+#include "arcdevice.h"
 
 /* packet receiver */
 static void rx(struct net_device *dev, int bufnum,
@@ -47,7 +48,8 @@
 	char *pktbuf, *pkthdrbuf;
 	int ofs;
 
-	BUGMSG(D_DURING, "it's a raw(cap) packet (length=%d)\n", length);
+	arc_printk(D_DURING, dev, "it's a raw(cap) packet (length=%d)\n",
+		   length);
 
 	if (length >= MinTU)
 		ofs = 512 - length;
@@ -55,8 +57,7 @@
 		ofs = 256 - length;
 
 	skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC);
-	if (skb == NULL) {
-		BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
+	if (!skb) {
 		dev->stats.rx_dropped++;
 		return;
 	}
@@ -66,17 +67,17 @@
 	pkt = (struct archdr *)skb_mac_header(skb);
 	skb_pull(skb, ARC_HDR_SIZE);
 
-	/* up to sizeof(pkt->soft) has already been copied from the card */
-	/* squeeze in an int for the cap encapsulation */
-
-	/* use these variables to be sure we count in bytes, not in
-	   sizeof(struct archdr) */
-	pktbuf=(char*)pkt;
-	pkthdrbuf=(char*)pkthdr;
-	memcpy(pktbuf, pkthdrbuf, ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto));
-	memcpy(pktbuf+ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto)+sizeof(int),
-	       pkthdrbuf+ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto),
-	       sizeof(struct archdr)-ARC_HDR_SIZE-sizeof(pkt->soft.cap.proto));
+	/* up to sizeof(pkt->soft) has already been copied from the card
+	 * squeeze in an int for the cap encapsulation
+	 * use these variables to be sure we count in bytes, not in
+	 * sizeof(struct archdr)
+	 */
+	pktbuf = (char *)pkt;
+	pkthdrbuf = (char *)pkthdr;
+	memcpy(pktbuf, pkthdrbuf, ARC_HDR_SIZE + sizeof(pkt->soft.cap.proto));
+	memcpy(pktbuf + ARC_HDR_SIZE + sizeof(pkt->soft.cap.proto) + sizeof(int),
+	       pkthdrbuf + ARC_HDR_SIZE + sizeof(pkt->soft.cap.proto),
+	       sizeof(struct archdr) - ARC_HDR_SIZE - sizeof(pkt->soft.cap.proto));
 
 	if (length > sizeof(pkt->soft))
 		lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
@@ -84,15 +85,14 @@
 				      + sizeof(int),
 				      length - sizeof(pkt->soft));
 
-	BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
+	if (BUGLVL(D_SKB))
+		arcnet_dump_skb(dev, skb, "rx");
 
 	skb->protocol = cpu_to_be16(ETH_P_ARCNET);
 	netif_rx(skb);
 }
 
-
-/*
- * Create the ARCnet hard/soft headers for cap mode.
+/* Create the ARCnet hard/soft headers for cap mode.
  * There aren't any soft headers in cap mode - not even the protocol id.
  */
 static int build_header(struct sk_buff *skb,
@@ -101,12 +101,12 @@
 			uint8_t daddr)
 {
 	int hdr_size = ARC_HDR_SIZE;
-	struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
+	struct archdr *pkt = (struct archdr *)skb_push(skb, hdr_size);
 
-	BUGMSG(D_PROTO, "Preparing header for cap packet %x.\n",
-	       *((int*)&pkt->soft.cap.cookie[0]));
-	/*
-	 * Set the source hardware address.
+	arc_printk(D_PROTO, dev, "Preparing header for cap packet %x.\n",
+		   *((int *)&pkt->soft.cap.cookie[0]));
+
+	/* Set the source hardware address.
 	 *
 	 * This is pretty pointless for most purposes, but it can help in
 	 * debugging.  ARCnet does not allow us to change the source address in
@@ -117,9 +117,8 @@
 	/* see linux/net/ethernet/eth.c to see where I got the following */
 
 	if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
-		/*
-		 * FIXME: fill in the last byte of the dest ipaddr here to better
-		 * comply with RFC1051 in "noarp" mode.
+		/* FIXME: fill in the last byte of the dest ipaddr here to
+		 * better comply with RFC1051 in "noarp" mode.
 		 */
 		pkt->hard.dest = 0;
 		return hdr_size;
@@ -130,7 +129,6 @@
 	return hdr_size;	/* success */
 }
 
-
 static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
 		      int bufnum)
 {
@@ -138,22 +136,21 @@
 	struct arc_hardware *hard = &pkt->hard;
 	int ofs;
 
-
 	/* hard header is not included in packet length */
 	length -= ARC_HDR_SIZE;
 	/* And neither is the cookie field */
 	length -= sizeof(int);
 
-	BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n",
-	       lp->next_tx, lp->cur_tx, bufnum);
+	arc_printk(D_DURING, dev, "prepare_tx: txbufs=%d/%d/%d\n",
+		   lp->next_tx, lp->cur_tx, bufnum);
 
-	BUGMSG(D_PROTO, "Sending for cap packet %x.\n",
-	       *((int*)&pkt->soft.cap.cookie[0]));
+	arc_printk(D_PROTO, dev, "Sending for cap packet %x.\n",
+		   *((int *)&pkt->soft.cap.cookie[0]));
 
 	if (length > XMTU) {
 		/* should never happen! other people already check for this. */
-		BUGMSG(D_NORMAL, "Bug!  prepare_tx with size %d (> %d)\n",
-		       length, XMTU);
+		arc_printk(D_NORMAL, dev, "Bug!  prepare_tx with size %d (> %d)\n",
+			   length, XMTU);
 		length = XMTU;
 	}
 	if (length > MinTU) {
@@ -162,11 +159,12 @@
 	} else if (length > MTU) {
 		hard->offset[0] = 0;
 		hard->offset[1] = ofs = 512 - length - 3;
-	} else
+	} else {
 		hard->offset[0] = ofs = 256 - length;
+	}
 
-	BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
-	       length,ofs);
+	arc_printk(D_DURING, dev, "prepare_tx: length=%d ofs=%d\n",
+		   length, ofs);
 
 	/* Copy the arcnet-header + the protocol byte down: */
 	lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
@@ -174,9 +172,10 @@
 			    sizeof(pkt->soft.cap.proto));
 
 	/* Skip the extra integer we have written into it as a cookie
-	   but write the rest of the message: */
-	lp->hw.copy_to_card(dev, bufnum, ofs+1,
-			    ((unsigned char*)&pkt->soft.cap.mes),length-1);
+	 * but write the rest of the message:
+	 */
+	lp->hw.copy_to_card(dev, bufnum, ofs + 1,
+			    ((unsigned char *)&pkt->soft.cap.mes), length - 1);
 
 	lp->lastload_dest = hard->dest;
 
@@ -188,21 +187,20 @@
 	struct arcnet_local *lp = netdev_priv(dev);
 	struct sk_buff *ackskb;
 	struct archdr *ackpkt;
-	int length=sizeof(struct arc_cap);
+	int length = sizeof(struct arc_cap);
 
-	BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n",
-		lp->outgoing.skb->protocol, acked);
+	arc_printk(D_DURING, dev, "capmode: ack_tx: protocol: %x: result: %d\n",
+		   lp->outgoing.skb->protocol, acked);
 
-	BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");
+	if (BUGLVL(D_SKB))
+		arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");
 
 	/* Now alloc a skb to send back up through the layers: */
-	ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC);
-	if (ackskb == NULL) {
-		BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n");
+	ackskb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
+	if (!ackskb)
 		goto free_outskb;
-	}
 
-	skb_put(ackskb, length + ARC_HDR_SIZE );
+	skb_put(ackskb, length + ARC_HDR_SIZE);
 	ackskb->dev = dev;
 
 	skb_reset_mac_header(ackskb);
@@ -212,39 +210,40 @@
 	skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
 				  ARC_HDR_SIZE + sizeof(struct arc_cap));
 	ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */
-	ackpkt->soft.cap.mes.ack=acked;
+	ackpkt->soft.cap.mes.ack = acked;
 
-	BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n",
-			*((int*)&ackpkt->soft.cap.cookie[0]));
+	arc_printk(D_PROTO, dev, "Ackknowledge for cap packet %x.\n",
+		   *((int *)&ackpkt->soft.cap.cookie[0]));
 
 	ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
 
-	BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
+	if (BUGLVL(D_SKB))
+		arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
 	netif_rx(ackskb);
 
 free_outskb:
 	dev_kfree_skb_irq(lp->outgoing.skb);
-	lp->outgoing.proto = NULL; /* We are always finished when in this protocol */
+	lp->outgoing.proto = NULL;
+			/* We are always finished when in this protocol */
 
 	return 0;
 }
 
-static struct ArcProto capmode_proto =
-{
-	'r',
-	XMTU,
-	0,
-	rx,
-	build_header,
-	prepare_tx,
-	NULL,
-	ack_tx
+static struct ArcProto capmode_proto = {
+	.suffix		= 'r',
+	.mtu		= XMTU,
+	.rx		= rx,
+	.build_header	= build_header,
+	.prepare_tx	= prepare_tx,
+	.ack_tx		= ack_tx
 };
 
-static void arcnet_cap_init(void)
+static int __init capmode_module_init(void)
 {
 	int count;
 
+	pr_info("cap mode (`c') encapsulation support loaded\n");
+
 	for (count = 1; count <= 8; count++)
 		if (arc_proto_map[count] == arc_proto_default)
 			arc_proto_map[count] = &capmode_proto;
@@ -255,12 +254,7 @@
 
 	arc_proto_default = &capmode_proto;
 	arc_raw_proto = &capmode_proto;
-}
 
-static int __init capmode_module_init(void)
-{
-	printk(VERSION);
-	arcnet_cap_init();
 	return 0;
 }
 
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 45c61a2..b9e9931 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -1,6 +1,6 @@
 /*
  * Linux ARCnet driver - COM20020 chipset support
- * 
+ *
  * Written 1997 by David Woodhouse.
  * Written 1994-1999 by Avery Pennarun.
  * Written 1999-2000 by Martin Mares <mj@ucw.cz>.
@@ -25,6 +25,9 @@
  *
  * **********************
  */
+
+#define pr_fmt(fmt) "arcnet:" KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
@@ -36,16 +39,12 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/bootmem.h>
-#include <linux/arcdevice.h>
-#include <linux/com20020.h>
+#include <linux/io.h>
 
-#include <asm/io.h>
+#include "arcdevice.h"
+#include "com20020.h"
 
-#define VERSION "arcnet: COM20020 ISA support (by David Woodhouse et al.)\n"
-
-
-/*
- * We cannot (yet) probe for an IO mapped card, although we can check that
+/* We cannot (yet) probe for an IO mapped card, although we can check that
  * it's where we were told it was, and even do autoirq.
  */
 static int __init com20020isa_probe(struct net_device *dev)
@@ -55,21 +54,21 @@
 	struct arcnet_local *lp = netdev_priv(dev);
 	int err;
 
-	BUGLVL(D_NORMAL) printk(VERSION);
+	if (BUGLVL(D_NORMAL))
+		pr_info("%s\n", "COM20020 ISA support (by David Woodhouse et al.)");
 
 	ioaddr = dev->base_addr;
 	if (!ioaddr) {
-		BUGMSG(D_NORMAL, "No autoprobe (yet) for IO mapped cards; you "
-		       "must specify the base address!\n");
+		arc_printk(D_NORMAL, dev, "No autoprobe (yet) for IO mapped cards; you must specify the base address!\n");
 		return -ENODEV;
 	}
 	if (!request_region(ioaddr, ARCNET_TOTAL_SIZE, "arcnet (COM20020)")) {
-		BUGMSG(D_NORMAL, "IO region %xh-%xh already allocated.\n",
-		       ioaddr, ioaddr + ARCNET_TOTAL_SIZE - 1);
+		arc_printk(D_NORMAL, dev, "IO region %xh-%xh already allocated.\n",
+			   ioaddr, ioaddr + ARCNET_TOTAL_SIZE - 1);
 		return -ENXIO;
 	}
-	if (ASTATUS() == 0xFF) {
-		BUGMSG(D_NORMAL, "IO address %x empty\n", ioaddr);
+	if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
+		arc_printk(D_NORMAL, dev, "IO address %x empty\n", ioaddr);
 		err = -ENODEV;
 		goto out;
 	}
@@ -83,23 +82,24 @@
 		 * card has just reset and the NORXflag is on until
 		 * we tell it to start receiving.
 		 */
-		BUGMSG(D_INIT_REASONS, "intmask was %02Xh\n", inb(_INTMASK));
-		outb(0, _INTMASK);
+		arc_printk(D_INIT_REASONS, dev, "intmask was %02Xh\n",
+			   arcnet_inb(ioaddr, COM20020_REG_R_STATUS));
+		arcnet_outb(0, ioaddr, COM20020_REG_W_INTMASK);
 		airqmask = probe_irq_on();
-		outb(NORXflag, _INTMASK);
+		arcnet_outb(NORXflag, ioaddr, COM20020_REG_W_INTMASK);
 		udelay(1);
-		outb(0, _INTMASK);
+		arcnet_outb(0, ioaddr, COM20020_REG_W_INTMASK);
 		dev->irq = probe_irq_off(airqmask);
 
 		if ((int)dev->irq <= 0) {
-			BUGMSG(D_INIT_REASONS, "Autoprobe IRQ failed first time\n");
+			arc_printk(D_INIT_REASONS, dev, "Autoprobe IRQ failed first time\n");
 			airqmask = probe_irq_on();
-			outb(NORXflag, _INTMASK);
+			arcnet_outb(NORXflag, ioaddr, COM20020_REG_W_INTMASK);
 			udelay(5);
-			outb(0, _INTMASK);
+			arcnet_outb(0, ioaddr, COM20020_REG_W_INTMASK);
 			dev->irq = probe_irq_off(airqmask);
 			if ((int)dev->irq <= 0) {
-				BUGMSG(D_NORMAL, "Autoprobe IRQ failed.\n");
+				arc_printk(D_NORMAL, dev, "Autoprobe IRQ failed.\n");
 				err = -ENODEV;
 				goto out;
 			}
@@ -107,7 +107,9 @@
 	}
 
 	lp->card_name = "ISA COM20020";
-	if ((err = com20020_found(dev, 0)) != 0)
+
+	err = com20020_found(dev, 0);
+	if (err != 0)
 		goto out;
 
 	return 0;
@@ -194,7 +196,7 @@
 
 	switch (ints[0]) {
 	default:		/* ERROR */
-		printk("com90xx: Too many arguments.\n");
+		pr_info("Too many arguments\n");
 	case 6:		/* Timeout */
 		timeout = ints[6];
 	case 5:		/* CKP value */
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 96edc13..a12bf83 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -1,7 +1,7 @@
 /*
  * Linux ARCnet driver - COM20020 PCI support
  * Contemporary Controls PCI20 and SOHARD SH-ARC PCI
- * 
+ *
  * Written 1994-1999 by Avery Pennarun,
  *    based on an ISA version by David Woodhouse.
  * Written 1999-2000 by Martin Mares <mj@ucw.cz>.
@@ -26,6 +26,9 @@
  *
  * **********************
  */
+
+#define pr_fmt(fmt) "arcnet:" KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
@@ -36,14 +39,11 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
-#include <linux/arcdevice.h>
-#include <linux/com20020.h>
 #include <linux/list.h>
+#include <linux/io.h>
 
-#include <asm/io.h>
-
-
-#define VERSION "arcnet: COM20020 PCI support\n"
+#include "arcdevice.h"
+#include "com20020.h"
 
 /* Module parameters */
 
@@ -64,7 +64,8 @@
 
 static void com20020pci_remove(struct pci_dev *pdev);
 
-static int com20020pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int com20020pci_probe(struct pci_dev *pdev,
+			     const struct pci_device_id *id)
 {
 	struct com20020_pci_card_info *ci;
 	struct net_device *dev;
@@ -86,7 +87,6 @@
 
 	INIT_LIST_HEAD(&priv->list_dev);
 
-
 	for (i = 0; i < ci->devcount; i++) {
 		struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i];
 		struct com20020_dev *card;
@@ -101,13 +101,13 @@
 
 		lp = netdev_priv(dev);
 
-		BUGMSG(D_NORMAL, "%s Controls\n", ci->name);
+		arc_printk(D_NORMAL, dev, "%s Controls\n", ci->name);
 		ioaddr = pci_resource_start(pdev, cm->bar) + cm->offset;
 
 		r = devm_request_region(&pdev->dev, ioaddr, cm->size,
 					"com20020-pci");
 		if (!r) {
-			pr_err("IO region %xh-%xh already allocated.\n",
+			pr_err("IO region %xh-%xh already allocated\n",
 			       ioaddr, ioaddr + cm->size - 1);
 			ret = -EBUSY;
 			goto out_port;
@@ -117,8 +117,8 @@
 		 * ARCNET controller needs
 		 * this access to detect bustype
 		 */
-		outb(0x00, ioaddr + 1);
-		inb(ioaddr + 1);
+		arcnet_outb(0x00, ioaddr, COM20020_REG_W_COMMAND);
+		arcnet_inb(ioaddr, COM20020_REG_R_DIAGSTAT);
 
 		dev->base_addr = ioaddr;
 		dev->dev_addr[0] = node;
@@ -131,7 +131,7 @@
 		lp->timeout = timeout;
 		lp->hw.owner = THIS_MODULE;
 
-		if (ASTATUS() == 0xFF) {
+		if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
 			pr_err("IO address %Xh is empty!\n", ioaddr);
 			ret = -EIO;
 			goto out_port;
@@ -143,10 +143,8 @@
 
 		card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev),
 				    GFP_KERNEL);
-		if (!card) {
-			pr_err("%s out of memory!\n", __func__);
+		if (!card)
 			return -ENOMEM;
-		}
 
 		card->index = i;
 		card->pci_priv = priv;
@@ -190,7 +188,11 @@
 	.name = "ARC-PCI",
 	.devcount = 1,
 	.chan_map_tbl = {
-		{ 2, 0x00, 0x08 },
+		{
+			.bar = 2,
+			.offset = 0x00,
+			.size = 0x08,
+		},
 	},
 	.flags = ARC_CAN_10MBIT,
 };
@@ -199,7 +201,11 @@
 	.name = "ARC-PCI",
 	.devcount = 1,
 	.chan_map_tbl = {
-		{ 2, 0x00, 0x08 },
+		{
+			.bar = 2,
+			.offset = 0x00,
+			.size = 0x08,
+		},
 	},
 	.flags = ARC_IS_5MBIT,
 };
@@ -209,7 +215,11 @@
 	.devcount = 1,
 	/* SOHARD needs PCI base addr 4 */
 	.chan_map_tbl = {
-		{4, 0x00, 0x08},
+		{
+			.bar = 4,
+			.offset = 0x00,
+			.size = 0x08
+		},
 	},
 	.flags = ARC_CAN_10MBIT,
 };
@@ -218,7 +228,11 @@
 	.name = "EAE PLX-PCI ARC1",
 	.devcount = 1,
 	.chan_map_tbl = {
-		{ 2, 0x00, 0x08 },
+		{
+			.bar = 2,
+			.offset = 0x00,
+			.size = 0x08,
+		},
 	},
 	.flags = ARC_CAN_10MBIT,
 };
@@ -227,8 +241,15 @@
 	.name = "EAE PLX-PCI MA1",
 	.devcount = 2,
 	.chan_map_tbl = {
-		{ 2, 0x00, 0x08 },
-		{ 2, 0x08, 0x08 }
+		{
+			.bar = 2,
+			.offset = 0x00,
+			.size = 0x08,
+		}, {
+			.bar = 2,
+			.offset = 0x08,
+			.size = 0x08,
+		}
 	},
 	.flags = ARC_CAN_10MBIT,
 };
@@ -404,7 +425,8 @@
 
 static int __init com20020pci_init(void)
 {
-	BUGLVL(D_NORMAL) printk(VERSION);
+	if (BUGLVL(D_NORMAL))
+		pr_info("%s\n", "COM20020 PCI support");
 	return pci_register_driver(&com20020pci_driver);
 }
 
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 1a84378..c82f323 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -1,6 +1,6 @@
 /*
  * Linux ARCnet driver - COM20020 chipset support
- * 
+ *
  * Written 1997 by David Woodhouse.
  * Written 1994-1999 by Avery Pennarun.
  * Written 1999 by Martin Mares <mj@ucw.cz>.
@@ -25,6 +25,9 @@
  *
  * **********************
  */
+
+#define pr_fmt(fmt) "arcnet:" KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
@@ -34,17 +37,16 @@
 #include <linux/netdevice.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/arcdevice.h>
-#include <linux/com20020.h>
+#include <linux/io.h>
 
-#include <asm/io.h>
+#include "arcdevice.h"
+#include "com20020.h"
 
-#define VERSION "arcnet: COM20020 chipset support (by David Woodhouse et al.)\n"
-
-static char *clockrates[] =
-{"10 Mb/s", "Reserved", "5 Mb/s",
- "2.5 Mb/s", "1.25Mb/s", "625 Kb/s", "312.5 Kb/s",
- "156.25 Kb/s", "Reserved", "Reserved", "Reserved"};
+static const char * const clockrates[] = {
+	"XXXXXXX", "XXXXXXXX", "XXXXXX", "2.5 Mb/s",
+	"1.25Mb/s", "625 Kb/s", "312.5 Kb/s", "156.25 Kb/s",
+	"Reserved", "Reserved", "Reserved"
+};
 
 static void com20020_command(struct net_device *dev, int command);
 static int com20020_status(struct net_device *dev);
@@ -63,35 +65,38 @@
 	int ioaddr = dev->base_addr, ofs = 512 * bufnum + offset;
 
 	/* set up the address register */
-	outb((ofs >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI);
-	outb(ofs & 0xff, _ADDR_LO);
+	arcnet_outb((ofs >> 8) | RDDATAflag | AUTOINCflag,
+		    ioaddr, COM20020_REG_W_ADDR_HI);
+	arcnet_outb(ofs & 0xff, ioaddr, COM20020_REG_W_ADDR_LO);
 
 	/* copy the data */
-	TIME("insb", count, insb(_MEMDATA, buf, count));
+	TIME(dev, "insb", count,
+	     arcnet_insb(ioaddr, COM20020_REG_RW_MEMDATA, buf, count));
 }
 
-
 static void com20020_copy_to_card(struct net_device *dev, int bufnum,
 				  int offset, void *buf, int count)
 {
 	int ioaddr = dev->base_addr, ofs = 512 * bufnum + offset;
 
 	/* set up the address register */
-	outb((ofs >> 8) | AUTOINCflag, _ADDR_HI);
-	outb(ofs & 0xff, _ADDR_LO);
+	arcnet_outb((ofs >> 8) | AUTOINCflag, ioaddr, COM20020_REG_W_ADDR_HI);
+	arcnet_outb(ofs & 0xff, ioaddr, COM20020_REG_W_ADDR_LO);
 
 	/* copy the data */
-	TIME("outsb", count, outsb(_MEMDATA, buf, count));
+	TIME(dev, "outsb", count,
+	     arcnet_outsb(ioaddr, COM20020_REG_RW_MEMDATA, buf, count));
 }
 
-
 /* Reset the card and check some basic stuff during the detection stage. */
 int com20020_check(struct net_device *dev)
 {
 	int ioaddr = dev->base_addr, status;
 	struct arcnet_local *lp = netdev_priv(dev);
 
-	ARCRESET0;
+	arcnet_outb(XTOcfg(3) | RESETcfg, ioaddr, COM20020_REG_W_CONFIG);
+	udelay(5);
+	arcnet_outb(XTOcfg(3), ioaddr, COM20020_REG_W_CONFIG);
 	mdelay(RESETtime);
 
 	lp->setup = lp->clockm ? 0 : (lp->clockp << 1);
@@ -101,49 +106,51 @@
 	/* Enable P1Mode for backplane mode */
 	lp->setup = lp->setup | P1MODE;
 
-	SET_SUBADR(SUB_SETUP1);
-	outb(lp->setup, _XREG);
+	com20020_set_subaddress(lp, ioaddr, SUB_SETUP1);
+	arcnet_outb(lp->setup, ioaddr, COM20020_REG_W_XREG);
 
-	if (lp->clockm != 0)
-	{
-		SET_SUBADR(SUB_SETUP2);
-		outb(lp->setup2, _XREG);
-	
+	if (lp->clockm != 0) {
+		com20020_set_subaddress(lp, ioaddr, SUB_SETUP2);
+		arcnet_outb(lp->setup2, ioaddr, COM20020_REG_W_XREG);
+
 		/* must now write the magic "restart operation" command */
 		mdelay(1);
-		outb(0x18, _COMMAND);
+		arcnet_outb(STARTIOcmd, ioaddr, COM20020_REG_W_COMMAND);
 	}
 
-	lp->config = 0x21 | (lp->timeout << 3) | (lp->backplane << 2);
+	lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
 	/* set node ID to 0x42 (but transmitter is disabled, so it's okay) */
-	SETCONF;
-	outb(0x42, ioaddr + BUS_ALIGN*7);
+	arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
+	arcnet_outb(0x42, ioaddr, COM20020_REG_W_XREG);
 
-	status = ASTATUS();
+	status = arcnet_inb(ioaddr, COM20020_REG_R_STATUS);
 
 	if ((status & 0x99) != (NORXflag | TXFREEflag | RESETflag)) {
-		BUGMSG(D_NORMAL, "status invalid (%Xh).\n", status);
+		arc_printk(D_NORMAL, dev, "status invalid (%Xh).\n", status);
 		return -ENODEV;
 	}
-	BUGMSG(D_INIT_REASONS, "status after reset: %X\n", status);
+	arc_printk(D_INIT_REASONS, dev, "status after reset: %X\n", status);
 
 	/* Enable TX */
-	outb(0x39, _CONFIG);
-	outb(inb(ioaddr + BUS_ALIGN*8), ioaddr + BUS_ALIGN*7);
+	lp->config |= TXENcfg;
+	arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
+	arcnet_outb(arcnet_inb(ioaddr, 8), ioaddr, COM20020_REG_W_XREG);
 
-	ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
-
-	status = ASTATUS();
-	BUGMSG(D_INIT_REASONS, "status after reset acknowledged: %X\n",
-	       status);
+	arcnet_outb(CFLAGScmd | RESETclear | CONFIGclear,
+		    ioaddr, COM20020_REG_W_COMMAND);
+	status = arcnet_inb(ioaddr, COM20020_REG_R_STATUS);
+	arc_printk(D_INIT_REASONS, dev, "status after reset acknowledged: %X\n",
+		   status);
 
 	/* Read first location of memory */
-	outb(0 | RDDATAflag | AUTOINCflag, _ADDR_HI);
-	outb(0, _ADDR_LO);
+	arcnet_outb(0 | RDDATAflag | AUTOINCflag,
+		    ioaddr, COM20020_REG_W_ADDR_HI);
+	arcnet_outb(0, ioaddr, COM20020_REG_W_ADDR_LO);
 
-	if ((status = inb(_MEMDATA)) != TESTvalue) {
-		BUGMSG(D_NORMAL, "Signature byte not found (%02Xh != D1h).\n",
-		       status);
+	status = arcnet_inb(ioaddr, COM20020_REG_RW_MEMDATA);
+	if (status != TESTvalue) {
+		arc_printk(D_NORMAL, dev, "Signature byte not found (%02Xh != D1h).\n",
+			   status);
 		return -ENODEV;
 	}
 	return 0;
@@ -156,8 +163,8 @@
 	struct sockaddr *hwaddr = addr;
 
 	memcpy(dev->dev_addr, hwaddr->sa_data, 1);
-	SET_SUBADR(SUB_NODE);
-	outb(dev->dev_addr[0], _XREG);
+	com20020_set_subaddress(lp, ioaddr, SUB_NODE);
+	arcnet_outb(dev->dev_addr[0], ioaddr, COM20020_REG_W_XREG);
 
 	return 0;
 }
@@ -192,48 +199,54 @@
 	lp->hw.copy_from_card = com20020_copy_from_card;
 	lp->hw.close = com20020_close;
 
+	/* FIXME: do this some other way! */
 	if (!dev->dev_addr[0])
-		dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8);	/* FIXME: do this some other way! */
+		dev->dev_addr[0] = arcnet_inb(ioaddr, 8);
 
-	SET_SUBADR(SUB_SETUP1);
-	outb(lp->setup, _XREG);
+	com20020_set_subaddress(lp, ioaddr, SUB_SETUP1);
+	arcnet_outb(lp->setup, ioaddr, COM20020_REG_W_XREG);
 
-	if (lp->card_flags & ARC_CAN_10MBIT)
-	{
-		SET_SUBADR(SUB_SETUP2);
-		outb(lp->setup2, _XREG);
-	
+	if (lp->card_flags & ARC_CAN_10MBIT) {
+		com20020_set_subaddress(lp, ioaddr, SUB_SETUP2);
+		arcnet_outb(lp->setup2, ioaddr, COM20020_REG_W_XREG);
+
 		/* must now write the magic "restart operation" command */
 		mdelay(1);
-		outb(0x18, _COMMAND);
+		arcnet_outb(STARTIOcmd, ioaddr, COM20020_REG_W_COMMAND);
 	}
 
-	lp->config = 0x20 | (lp->timeout << 3) | (lp->backplane << 2) | 1;
+	lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
 	/* Default 0x38 + register: Node ID */
-	SETCONF;
-	outb(dev->dev_addr[0], _XREG);
+	arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
+	arcnet_outb(dev->dev_addr[0], ioaddr, COM20020_REG_W_XREG);
 
 	/* reserve the irq */
 	if (request_irq(dev->irq, arcnet_interrupt, shared,
 			"arcnet (COM20020)", dev)) {
-		BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
+		arc_printk(D_NORMAL, dev, "Can't get IRQ %d!\n", dev->irq);
 		return -ENODEV;
 	}
 
 	dev->base_addr = ioaddr;
 
-	BUGMSG(D_NORMAL, "%s: station %02Xh found at %03lXh, IRQ %d.\n",
-	       lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq);
+	arc_printk(D_NORMAL, dev, "%s: station %02Xh found at %03lXh, IRQ %d.\n",
+		   lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq);
 
 	if (lp->backplane)
-		BUGMSG(D_NORMAL, "Using backplane mode.\n");
+		arc_printk(D_NORMAL, dev, "Using backplane mode.\n");
 
 	if (lp->timeout != 3)
-		BUGMSG(D_NORMAL, "Using extended timeout value of %d.\n", lp->timeout);
+		arc_printk(D_NORMAL, dev, "Using extended timeout value of %d\n",
+			   lp->timeout);
 
-	BUGMSG(D_NORMAL, "Using CKP %d - data rate %s.\n",
-	       lp->setup >> 1, 
-	       clockrates[3 - ((lp->setup2 & 0xF0) >> 4) + ((lp->setup & 0x0F) >> 1)]);
+	arc_printk(D_NORMAL, dev, "Using CKP %d - data rate %s\n",
+		   lp->setup >> 1,
+		   clockrates[3 -
+			      ((lp->setup2 & 0xF0) >> 4) +
+			      ((lp->setup & 0x0F) >> 1)]);
+			/* The clockrates array index looks very fragile.
+			 * It seems like it could have negative indexing.
+			 */
 
 	if (register_netdev(dev)) {
 		free_irq(dev->irq, dev);
@@ -242,10 +255,8 @@
 	return 0;
 }
 
-
-/* 
- * Do a hardware reset on the card, and set up necessary registers.
- * 
+/* Do a hardware reset on the card, and set up necessary registers.
+ *
  * This should be called as little as possible, because it disrupts the
  * token on the network (causes a RECON) and requires a significant delay.
  *
@@ -257,65 +268,71 @@
 	u_int ioaddr = dev->base_addr;
 	u_char inbyte;
 
-	BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n",
-		__FILE__,__LINE__,__func__,dev,lp,dev->name);
-	BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n",
-	       dev->name, ASTATUS());
+	arc_printk(D_DEBUG, dev, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n",
+		   __FILE__, __LINE__, __func__, dev, lp, dev->name);
+	arc_printk(D_INIT, dev, "Resetting %s (status=%02Xh)\n",
+		   dev->name, arcnet_inb(ioaddr, COM20020_REG_R_STATUS));
 
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
+	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
 	lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2);
 	/* power-up defaults */
-	SETCONF;
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
+	arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
+	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
 
 	if (really_reset) {
 		/* reset the card */
-		ARCRESET;
-		mdelay(RESETtime * 2);	/* COM20020 seems to be slower sometimes */
+		arcnet_outb(lp->config | RESETcfg, ioaddr, COM20020_REG_W_CONFIG);
+		udelay(5);
+		arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
+		mdelay(RESETtime * 2);
+				/* COM20020 seems to be slower sometimes */
 	}
 	/* clear flags & end reset */
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
-	ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
+	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
+	arcnet_outb(CFLAGScmd | RESETclear | CONFIGclear,
+		    ioaddr, COM20020_REG_W_COMMAND);
 
 	/* verify that the ARCnet signature byte is present */
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
+	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
 
 	com20020_copy_from_card(dev, 0, 0, &inbyte, 1);
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
+	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
 	if (inbyte != TESTvalue) {
-		BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
-		BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
+		arc_printk(D_DEBUG, dev, "%s: %d: %s\n",
+			   __FILE__, __LINE__, __func__);
+		arc_printk(D_NORMAL, dev, "reset failed: TESTvalue not present.\n");
 		return 1;
 	}
 	/* enable extended (512-byte) packets */
-	ACOMMAND(CONFIGcmd | EXTconf);
-	BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
+	arcnet_outb(CONFIGcmd | EXTconf, ioaddr, COM20020_REG_W_COMMAND);
+
+	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
 
 	/* done!  return success. */
 	return 0;
 }
 
-
 static void com20020_setmask(struct net_device *dev, int mask)
 {
 	u_int ioaddr = dev->base_addr;
-	BUGMSG(D_DURING, "Setting mask to %x at %x\n",mask,ioaddr);
-	AINTMASK(mask);
-}
 
+	arc_printk(D_DURING, dev, "Setting mask to %x at %x\n", mask, ioaddr);
+	arcnet_outb(mask, ioaddr, COM20020_REG_W_INTMASK);
+}
 
 static void com20020_command(struct net_device *dev, int cmd)
 {
 	u_int ioaddr = dev->base_addr;
-	ACOMMAND(cmd);
-}
 
+	arcnet_outb(cmd, ioaddr, COM20020_REG_W_COMMAND);
+}
 
 static int com20020_status(struct net_device *dev)
 {
 	u_int ioaddr = dev->base_addr;
 
-	return ASTATUS() + (ADIAGSTATUS()<<8);
+	return arcnet_inb(ioaddr, COM20020_REG_R_STATUS) +
+		(arcnet_inb(ioaddr, COM20020_REG_R_DIAGSTAT) << 8);
 }
 
 static void com20020_close(struct net_device *dev)
@@ -325,7 +342,7 @@
 
 	/* disable transmitter */
 	lp->config &= ~TXENcfg;
-	SETCONF;
+	arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
 }
 
 /* Set or clear the multicast filter for this adaptor.
@@ -340,20 +357,20 @@
 	struct arcnet_local *lp = netdev_priv(dev);
 	int ioaddr = dev->base_addr;
 
-	if ((dev->flags & IFF_PROMISC) && (dev->flags & IFF_UP)) {	/* Enable promiscuous mode */
+	if ((dev->flags & IFF_PROMISC) && (dev->flags & IFF_UP)) {
+		/* Enable promiscuous mode */
 		if (!(lp->setup & PROMISCset))
-			BUGMSG(D_NORMAL, "Setting promiscuous flag...\n");
-		SET_SUBADR(SUB_SETUP1);
+			arc_printk(D_NORMAL, dev, "Setting promiscuous flag...\n");
+		com20020_set_subaddress(lp, ioaddr, SUB_SETUP1);
 		lp->setup |= PROMISCset;
-		outb(lp->setup, _XREG);
-	} else
+		arcnet_outb(lp->setup, ioaddr, COM20020_REG_W_XREG);
+	} else {
 		/* Disable promiscuous mode, use normal mode */
-	{
 		if ((lp->setup & PROMISCset))
-			BUGMSG(D_NORMAL, "Resetting promiscuous flag...\n");
-		SET_SUBADR(SUB_SETUP1);
+			arc_printk(D_NORMAL, dev, "Resetting promiscuous flag...\n");
+		com20020_set_subaddress(lp, ioaddr, SUB_SETUP1);
 		lp->setup &= ~PROMISCset;
-		outb(lp->setup, _XREG);
+		arcnet_outb(lp->setup, ioaddr, COM20020_REG_W_XREG);
 	}
 }
 
@@ -371,7 +388,8 @@
 
 static int __init com20020_module_init(void)
 {
-	BUGLVL(D_NORMAL) printk(VERSION);
+	if (BUGLVL(D_NORMAL))
+		pr_info("%s\n", "COM20020 chipset support (by David Woodhouse et al.)");
 	return 0;
 }
 
diff --git a/include/linux/com20020.h b/drivers/net/arcnet/com20020.h
similarity index 61%
rename from include/linux/com20020.h
rename to drivers/net/arcnet/com20020.h
index 8589899..22a460f 100644
--- a/include/linux/com20020.h
+++ b/drivers/net/arcnet/com20020.h
@@ -1,6 +1,6 @@
 /*
  * Linux ARCnet driver - COM20020 chipset support - function declarations
- * 
+ *
  * Written 1997 by David Woodhouse.
  * Written 1994-1999 by Avery Pennarun.
  * Derived from skeleton.c by Donald Becker.
@@ -34,13 +34,6 @@
 /* The number of low I/O ports used by the card. */
 #define ARCNET_TOTAL_SIZE 8
 
-/* various register addresses */
-#ifdef CONFIG_SA1100_CT6001
-#define BUS_ALIGN  2  /* 8 bit device on a 16 bit bus - needs padding */
-#else
-#define BUS_ALIGN  1
-#endif
-
 #define PLX_PCI_MAX_CARDS 2
 
 struct com20020_pci_channel_map {
@@ -71,17 +64,18 @@
 	int index;
 };
 
-#define _INTMASK  (ioaddr+BUS_ALIGN*0)	/* writable */
-#define _STATUS   (ioaddr+BUS_ALIGN*0)	/* readable */
-#define _COMMAND  (ioaddr+BUS_ALIGN*1)	/* standard arcnet commands */
-#define _DIAGSTAT (ioaddr+BUS_ALIGN*1)	/* diagnostic status register */
-#define _ADDR_HI  (ioaddr+BUS_ALIGN*2)	/* control registers for IO-mapped memory */
-#define _ADDR_LO  (ioaddr+BUS_ALIGN*3)
-#define _MEMDATA  (ioaddr+BUS_ALIGN*4)	/* data port for IO-mapped memory */
-#define _SUBADR   (ioaddr+BUS_ALIGN*5)	/* the extended port _XREG refers to */
-#define _CONFIG   (ioaddr+BUS_ALIGN*6)	/* configuration register */
-#define _XREG     (ioaddr+BUS_ALIGN*7)	/* extra registers (indexed by _CONFIG
-  					or _SUBADR) */
+#define COM20020_REG_W_INTMASK	0	/* writable */
+#define COM20020_REG_R_STATUS	0	/* readable */
+#define COM20020_REG_W_COMMAND	1	/* standard arcnet commands */
+#define COM20020_REG_R_DIAGSTAT	1	/* diagnostic status */
+#define COM20020_REG_W_ADDR_HI	2	/* control for IO-mapped memory */
+#define COM20020_REG_W_ADDR_LO	3
+#define COM20020_REG_RW_MEMDATA	4	/* data port for IO-mapped memory */
+#define COM20020_REG_W_SUBADR	5	/* the extended port _XREG refers to */
+#define COM20020_REG_W_CONFIG	6	/* configuration */
+#define COM20020_REG_W_XREG	7	/* extra
+					 * (indexed by _CONFIG or _SUBADDR)
+					 */
 
 /* in the ADDR_HI register */
 #define RDDATAflag	0x80	/* next access is a read (not a write) */
@@ -92,6 +86,7 @@
 /* in the CONFIG register */
 #define RESETcfg	0x80	/* put card in reset state */
 #define TXENcfg		0x20	/* enable TX */
+#define XTOcfg(x)	((x) << 3)	/* extended timeout */
 
 /* in SETUP register */
 #define PROMISCset	0x10	/* enable RCV_ALL */
@@ -109,37 +104,15 @@
 #define SUB_BUSCTL	5	/* bus control options */
 #define SUB_DMACOUNT	6	/* DMA count options */
 
-#define SET_SUBADR(x) do { \
-	if ((x) < 4) \
-	{ \
-		lp->config = (lp->config & ~0x03) | (x); \
-		SETCONF; \
-	} \
-	else \
-	{ \
-		outb(x, _SUBADR); \
-	} \
-} while (0)
-
-#undef ARCRESET
-#undef ASTATUS
-#undef ACOMMAND
-#undef AINTMASK
-
-#define ARCRESET { outb(lp->config | 0x80, _CONFIG); \
-		    udelay(5);                        \
-		    outb(lp->config , _CONFIG);       \
-                  }
-#define ARCRESET0 { outb(0x18 | 0x80, _CONFIG);   \
-		    udelay(5);                       \
-		    outb(0x18 , _CONFIG);            \
-                  }
-
-#define ASTATUS()	inb(_STATUS)
-#define ADIAGSTATUS()	inb(_DIAGSTAT)
-#define ACOMMAND(cmd)	outb((cmd),_COMMAND)
-#define AINTMASK(msk)	outb((msk),_INTMASK)
-
-#define SETCONF		outb(lp->config, _CONFIG)
+static inline void com20020_set_subaddress(struct arcnet_local *lp,
+					   int ioaddr, int val)
+{
+	if (val < 4) {
+		lp->config = (lp->config & ~0x03) | val;
+		arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
+	} else {
+		arcnet_outb(val, ioaddr, COM20020_REG_W_SUBADR);
+	}
+}
 
 #endif /* __COM20020_H */
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index 057d958..cf607ff 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -1,6 +1,6 @@
 /*
  * Linux ARCnet driver - COM20020 PCMCIA support
- * 
+ *
  * Written 1994-1999 by Avery Pennarun,
  *    based on an ISA version by David Woodhouse.
  * Derived from ibmtr_cs.c by Steve Kipisz (pcmcia-cs 3.1.4)
@@ -19,18 +19,21 @@
  * Director, National Security Agency.  This software may only be used
  * and distributed according to the terms of the GNU General Public License as
  * modified by SRC, incorporated herein by reference.
- * 
+ *
  * **********************
  * Changes:
  * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 08/08/2000
  * - reorganize kmallocs in com20020_attach, checking all for failure
  *   and releasing the previous allocations if one fails
  * **********************
- * 
+ *
  * For more details, see drivers/net/arcnet.c
  *
  * **********************
  */
+
+#define pr_fmt(fmt) "arcnet:" KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
@@ -39,52 +42,45 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
-#include <linux/arcdevice.h>
-#include <linux/com20020.h>
-
+#include <linux/io.h>
 #include <pcmcia/cistpl.h>
 #include <pcmcia/ds.h>
 
-#include <asm/io.h>
-
-#define VERSION "arcnet: COM20020 PCMCIA support loaded.\n"
-
+#include "arcdevice.h"
+#include "com20020.h"
 
 static void regdump(struct net_device *dev)
 {
 #ifdef DEBUG
-    int ioaddr = dev->base_addr;
-    int count;
-    
-    netdev_dbg(dev, "register dump:\n");
-    for (count = ioaddr; count < ioaddr + 16; count++)
-    {
-	if (!(count % 16))
-	    pr_cont("%04X:", count);
-	pr_cont(" %02X", inb(count));
-    }
-    pr_cont("\n");
-    
-    netdev_dbg(dev, "buffer0 dump:\n");
+	int ioaddr = dev->base_addr;
+	int count;
+
+	netdev_dbg(dev, "register dump:\n");
+	for (count = 0; count < 16; count++) {
+		if (!(count % 16))
+			pr_cont("%04X:", ioaddr + count);
+		pr_cont(" %02X", arcnet_inb(ioaddr, count));
+	}
+	pr_cont("\n");
+
+	netdev_dbg(dev, "buffer0 dump:\n");
 	/* set up the address register */
-        count = 0;
-	outb((count >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI);
-	outb(count & 0xff, _ADDR_LO);
-    
-    for (count = 0; count < 256+32; count++)
-    {
-	if (!(count % 16))
-	    pr_cont("%04X:", count);
-	
-	/* copy the data */
-	pr_cont(" %02X", inb(_MEMDATA));
-    }
-    pr_cont("\n");
+	count = 0;
+	arcnet_outb((count >> 8) | RDDATAflag | AUTOINCflag,
+		    ioaddr, com20020_REG_W_ADDR_HI);
+	arcnet_outb(count & 0xff, ioaddr, COM20020_REG_W_ADDR_LO);
+
+	for (count = 0; count < 256 + 32; count++) {
+		if (!(count % 16))
+			pr_cont("%04X:", count);
+
+		/* copy the data */
+		pr_cont(" %02X", arcnet_inb(ioaddr, COM20020_REG_RW_MEMDATA));
+	}
+	pr_cont("\n");
 #endif
 }
 
-
-
 /*====================================================================*/
 
 /* Parameters that can be set with 'insmod' */
@@ -114,169 +110,161 @@
 
 static int com20020_probe(struct pcmcia_device *p_dev)
 {
-    struct com20020_dev *info;
-    struct net_device *dev;
-    struct arcnet_local *lp;
+	struct com20020_dev *info;
+	struct net_device *dev;
+	struct arcnet_local *lp;
 
-    dev_dbg(&p_dev->dev, "com20020_attach()\n");
+	dev_dbg(&p_dev->dev, "com20020_attach()\n");
 
-    /* Create new network device */
-    info = kzalloc(sizeof(*info), GFP_KERNEL);
-    if (!info)
-	goto fail_alloc_info;
+	/* Create new network device */
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		goto fail_alloc_info;
 
-    dev = alloc_arcdev("");
-    if (!dev)
-	goto fail_alloc_dev;
+	dev = alloc_arcdev("");
+	if (!dev)
+		goto fail_alloc_dev;
 
-    lp = netdev_priv(dev);
-    lp->timeout = timeout;
-    lp->backplane = backplane;
-    lp->clockp = clockp;
-    lp->clockm = clockm & 3;
-    lp->hw.owner = THIS_MODULE;
+	lp = netdev_priv(dev);
+	lp->timeout = timeout;
+	lp->backplane = backplane;
+	lp->clockp = clockp;
+	lp->clockm = clockm & 3;
+	lp->hw.owner = THIS_MODULE;
 
-    /* fill in our module parameters as defaults */
-    dev->dev_addr[0] = node;
+	/* fill in our module parameters as defaults */
+	dev->dev_addr[0] = node;
 
-    p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
-    p_dev->resource[0]->end = 16;
-    p_dev->config_flags |= CONF_ENABLE_IRQ;
+	p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+	p_dev->resource[0]->end = 16;
+	p_dev->config_flags |= CONF_ENABLE_IRQ;
 
-    info->dev = dev;
-    p_dev->priv = info;
+	info->dev = dev;
+	p_dev->priv = info;
 
-    return com20020_config(p_dev);
+	return com20020_config(p_dev);
 
 fail_alloc_dev:
-    kfree(info);
+	kfree(info);
 fail_alloc_info:
-    return -ENOMEM;
+	return -ENOMEM;
 } /* com20020_attach */
 
 static void com20020_detach(struct pcmcia_device *link)
 {
-    struct com20020_dev *info = link->priv;
-    struct net_device *dev = info->dev;
+	struct com20020_dev *info = link->priv;
+	struct net_device *dev = info->dev;
 
-    dev_dbg(&link->dev, "detach...\n");
+	dev_dbg(&link->dev, "detach...\n");
 
-    dev_dbg(&link->dev, "com20020_detach\n");
+	dev_dbg(&link->dev, "com20020_detach\n");
 
-    dev_dbg(&link->dev, "unregister...\n");
+	dev_dbg(&link->dev, "unregister...\n");
 
-    unregister_netdev(dev);
+	unregister_netdev(dev);
 
-    /*
-     * this is necessary because we register our IRQ separately
-     * from card services.
-     */
-    if (dev->irq)
-	    free_irq(dev->irq, dev);
+	/* this is necessary because we register our IRQ separately
+	 * from card services.
+	 */
+	if (dev->irq)
+		free_irq(dev->irq, dev);
 
-    com20020_release(link);
+	com20020_release(link);
 
-    /* Unlink device structure, free bits */
-    dev_dbg(&link->dev, "unlinking...\n");
-    if (link->priv)
-    {
-	dev = info->dev;
-	if (dev)
-	{
-	    dev_dbg(&link->dev, "kfree...\n");
-	    free_netdev(dev);
+	/* Unlink device structure, free bits */
+	dev_dbg(&link->dev, "unlinking...\n");
+	if (link->priv) {
+		dev = info->dev;
+		if (dev) {
+			dev_dbg(&link->dev, "kfree...\n");
+			free_netdev(dev);
+		}
+		dev_dbg(&link->dev, "kfree2...\n");
+		kfree(info);
 	}
-	dev_dbg(&link->dev, "kfree2...\n");
-	kfree(info);
-    }
 
 } /* com20020_detach */
 
 static int com20020_config(struct pcmcia_device *link)
 {
-    struct arcnet_local *lp;
-    struct com20020_dev *info;
-    struct net_device *dev;
-    int i, ret;
-    int ioaddr;
+	struct arcnet_local *lp;
+	struct com20020_dev *info;
+	struct net_device *dev;
+	int i, ret;
+	int ioaddr;
 
-    info = link->priv;
-    dev = info->dev;
+	info = link->priv;
+	dev = info->dev;
 
-    dev_dbg(&link->dev, "config...\n");
+	dev_dbg(&link->dev, "config...\n");
 
-    dev_dbg(&link->dev, "com20020_config\n");
+	dev_dbg(&link->dev, "com20020_config\n");
 
-    dev_dbg(&link->dev, "baseport1 is %Xh\n",
-	    (unsigned int) link->resource[0]->start);
+	dev_dbg(&link->dev, "baseport1 is %Xh\n",
+		(unsigned int)link->resource[0]->start);
 
-    i = -ENODEV;
-    link->io_lines = 16;
+	i = -ENODEV;
+	link->io_lines = 16;
 
-    if (!link->resource[0]->start)
-    {
-	for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x10)
-	{
-	    link->resource[0]->start = ioaddr;
-	    i = pcmcia_request_io(link);
-	    if (i == 0)
-		break;
+	if (!link->resource[0]->start) {
+		for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x10) {
+			link->resource[0]->start = ioaddr;
+			i = pcmcia_request_io(link);
+			if (i == 0)
+				break;
+		}
+	} else {
+		i = pcmcia_request_io(link);
 	}
-    }
-    else
-	i = pcmcia_request_io(link);
-    
-    if (i != 0)
-    {
-	dev_dbg(&link->dev, "requestIO failed totally!\n");
-	goto failed;
-    }
-	
-    ioaddr = dev->base_addr = link->resource[0]->start;
-    dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr);
 
-    dev_dbg(&link->dev, "request IRQ %d\n",
-	    link->irq);
-    if (!link->irq)
-    {
-	dev_dbg(&link->dev, "requestIRQ failed totally!\n");
-	goto failed;
-    }
+	if (i != 0) {
+		dev_dbg(&link->dev, "requestIO failed totally!\n");
+		goto failed;
+	}
 
-    dev->irq = link->irq;
+	ioaddr = dev->base_addr = link->resource[0]->start;
+	dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr);
 
-    ret = pcmcia_enable_device(link);
-    if (ret)
-	    goto failed;
+	dev_dbg(&link->dev, "request IRQ %d\n",
+		link->irq);
+	if (!link->irq) {
+		dev_dbg(&link->dev, "requestIRQ failed totally!\n");
+		goto failed;
+	}
 
-    if (com20020_check(dev))
-    {
-	regdump(dev);
-	goto failed;
-    }
-    
-    lp = netdev_priv(dev);
-    lp->card_name = "PCMCIA COM20020";
-    lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */
+	dev->irq = link->irq;
 
-    SET_NETDEV_DEV(dev, &link->dev);
+	ret = pcmcia_enable_device(link);
+	if (ret)
+		goto failed;
 
-    i = com20020_found(dev, 0);	/* calls register_netdev */
-    
-    if (i != 0) {
-	dev_notice(&link->dev,
-		   "com20020_found() failed\n");
-	goto failed;
-    }
+	if (com20020_check(dev)) {
+		regdump(dev);
+		goto failed;
+	}
 
-    netdev_dbg(dev, "port %#3lx, irq %d\n",
-	       dev->base_addr, dev->irq);
-    return 0;
+	lp = netdev_priv(dev);
+	lp->card_name = "PCMCIA COM20020";
+	lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */
+
+	SET_NETDEV_DEV(dev, &link->dev);
+
+	i = com20020_found(dev, 0);	/* calls register_netdev */
+
+	if (i != 0) {
+		dev_notice(&link->dev,
+			   "com20020_found() failed\n");
+		goto failed;
+	}
+
+	netdev_dbg(dev, "port %#3lx, irq %d\n",
+		   dev->base_addr, dev->irq);
+	return 0;
 
 failed:
-    dev_dbg(&link->dev, "com20020_config failed...\n");
-    com20020_release(link);
-    return -ENODEV;
+	dev_dbg(&link->dev, "com20020_config failed...\n");
+	com20020_release(link);
+	return -ENODEV;
 } /* com20020_config */
 
 static void com20020_release(struct pcmcia_device *link)
@@ -304,7 +292,10 @@
 	if (link->open) {
 		int ioaddr = dev->base_addr;
 		struct arcnet_local *lp = netdev_priv(dev);
-		ARCRESET;
+
+		arcnet_outb(lp->config | 0x80, ioaddr, COM20020_REG_W_CONFIG);
+		udelay(5);
+		arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
 	}
 
 	return 0;
@@ -312,9 +303,9 @@
 
 static const struct pcmcia_device_id com20020_ids[] = {
 	PCMCIA_DEVICE_PROD_ID12("Contemporary Control Systems, Inc.",
-			"PCM20 Arcnet Adapter", 0x59991666, 0x95dfffaf),
+				"PCM20 Arcnet Adapter", 0x59991666, 0x95dfffaf),
 	PCMCIA_DEVICE_PROD_ID12("SoHard AG",
-			"SH ARC PCMCIA", 0xf8991729, 0x69dff0c7),
+				"SH ARC PCMCIA", 0xf8991729, 0x69dff0c7),
 	PCMCIA_DEVICE_NULL
 };
 MODULE_DEVICE_TABLE(pcmcia, com20020_ids);
diff --git a/drivers/net/arcnet/com9026.h b/drivers/net/arcnet/com9026.h
new file mode 100644
index 0000000..efcaf67
--- /dev/null
+++ b/drivers/net/arcnet/com9026.h
@@ -0,0 +1,17 @@
+#ifndef __COM9026_H
+#define __COM9026_H
+
+/* COM 9026 controller chip --> ARCnet register addresses */
+
+#define COM9026_REG_W_INTMASK	0	/* writable */
+#define COM9026_REG_R_STATUS	0	/* readable */
+#define COM9026_REG_W_COMMAND	1	/* writable, returns random vals on read (?) */
+#define COM9026_REG_RW_CONFIG	2	/* Configuration register */
+#define COM9026_REG_R_RESET	8	/* software reset (on read) */
+#define COM9026_REG_RW_MEMDATA	12	/* Data port for IO-mapped memory */
+#define COM9026_REG_W_ADDR_LO	14	/* Control registers for said */
+#define COM9026_REG_W_ADDR_HI	15
+
+#define COM9026_REG_R_STATION	1	/* Station ID */
+
+#endif
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 487d780..b57863d 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -1,6 +1,6 @@
 /*
  * Linux ARCnet driver - COM90xx chipset (IO-mapped buffers)
- * 
+ *
  * Written 1997 by David Woodhouse.
  * Written 1994-1999 by Avery Pennarun.
  * Written 1999-2000 by Martin Mares <mj@ucw.cz>.
@@ -25,6 +25,9 @@
  *
  * **********************
  */
+
+#define pr_fmt(fmt) "arcnet:" KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -34,12 +37,10 @@
 #include <linux/bootmem.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <asm/io.h>
-#include <linux/arcdevice.h>
+#include <linux/io.h>
 
-
-#define VERSION "arcnet: COM90xx IO-mapped mode support (by David Woodhouse et el.)\n"
-
+#include "arcdevice.h"
+#include "com9026.h"
 
 /* Internal function declarations */
 
@@ -50,35 +51,14 @@
 static int com90io_reset(struct net_device *dev, int really_reset);
 static void com90io_copy_to_card(struct net_device *dev, int bufnum, int offset,
 				 void *buf, int count);
-static void com90io_copy_from_card(struct net_device *dev, int bufnum, int offset,
-				   void *buf, int count);
-
+static void com90io_copy_from_card(struct net_device *dev, int bufnum,
+				   int offset, void *buf, int count);
 
 /* Handy defines for ARCnet specific stuff */
 
 /* The number of low I/O ports used by the card. */
 #define ARCNET_TOTAL_SIZE 16
 
-/* COM 9026 controller chip --> ARCnet register addresses */
-#define _INTMASK (ioaddr+0)	/* writable */
-#define _STATUS  (ioaddr+0)	/* readable */
-#define _COMMAND (ioaddr+1)	/* writable, returns random vals on read (?) */
-#define _RESET  (ioaddr+8)	/* software reset (on read) */
-#define _MEMDATA  (ioaddr+12)	/* Data port for IO-mapped memory */
-#define _ADDR_HI  (ioaddr+15)	/* Control registers for said */
-#define _ADDR_LO  (ioaddr+14)
-#define _CONFIG  (ioaddr+2)	/* Configuration register */
-
-#undef ASTATUS
-#undef ACOMMAND
-#undef AINTMASK
-
-#define ASTATUS()	inb(_STATUS)
-#define ACOMMAND(cmd) outb((cmd),_COMMAND)
-#define AINTMASK(msk)	outb((msk),_INTMASK)
-#define SETCONF() 	outb((lp->config),_CONFIG)
-
-
 /****************************************************************************
  *                                                                          *
  * IO-mapped operation routines                                             *
@@ -92,58 +72,59 @@
 {
 	int ioaddr = dev->base_addr;
 
-	outb(offset >> 8, _ADDR_HI);
-	outb(offset & 0xff, _ADDR_LO);
+	arcnet_outb(offset >> 8, ioaddr, COM9026_REG_W_ADDR_HI);
+	arcnet_outb(offset & 0xff, ioaddr, COM9026_REG_W_ADDR_LO);
 
-	return inb(_MEMDATA);
+	return arcnet_inb(ioaddr, COM9026_REG_RW_MEMDATA);
 }
 
 #ifdef ONE_AT_A_TIME_TX
-static void put_buffer_byte(struct net_device *dev, unsigned offset, u_char datum)
+static void put_buffer_byte(struct net_device *dev, unsigned offset,
+			    u_char datum)
 {
 	int ioaddr = dev->base_addr;
 
-	outb(offset >> 8, _ADDR_HI);
-	outb(offset & 0xff, _ADDR_LO);
+	arcnet_outb(offset >> 8, ioaddr, COM9026_REG_W_ADDR_HI);
+	arcnet_outb(offset & 0xff, ioaddr, COM9026_REG_W_ADDR_LO);
 
-	outb(datum, _MEMDATA);
+	arcnet_outb(datum, ioaddr, COM9026_REG_RW_MEMDATA);
 }
 
 #endif
 
-
-static void get_whole_buffer(struct net_device *dev, unsigned offset, unsigned length, char *dest)
+static void get_whole_buffer(struct net_device *dev, unsigned offset,
+			     unsigned length, char *dest)
 {
 	int ioaddr = dev->base_addr;
 
-	outb((offset >> 8) | AUTOINCflag, _ADDR_HI);
-	outb(offset & 0xff, _ADDR_LO);
+	arcnet_outb((offset >> 8) | AUTOINCflag, ioaddr, COM9026_REG_W_ADDR_HI);
+	arcnet_outb(offset & 0xff, ioaddr, COM9026_REG_W_ADDR_LO);
 
 	while (length--)
 #ifdef ONE_AT_A_TIME_RX
 		*(dest++) = get_buffer_byte(dev, offset++);
 #else
-		*(dest++) = inb(_MEMDATA);
+		*(dest++) = arcnet_inb(ioaddr, COM9026_REG_RW_MEMDATA);
 #endif
 }
 
-static void put_whole_buffer(struct net_device *dev, unsigned offset, unsigned length, char *dest)
+static void put_whole_buffer(struct net_device *dev, unsigned offset,
+			     unsigned length, char *dest)
 {
 	int ioaddr = dev->base_addr;
 
-	outb((offset >> 8) | AUTOINCflag, _ADDR_HI);
-	outb(offset & 0xff, _ADDR_LO);
+	arcnet_outb((offset >> 8) | AUTOINCflag, ioaddr, COM9026_REG_W_ADDR_HI);
+	arcnet_outb(offset & 0xff, ioaddr,COM9026_REG_W_ADDR_LO);
 
 	while (length--)
 #ifdef ONE_AT_A_TIME_TX
 		put_buffer_byte(dev, offset++, *(dest++));
 #else
-		outb(*(dest++), _MEMDATA);
+		arcnet_outb(*(dest++), ioaddr, COM9026_REG_RW_MEMDATA);
 #endif
 }
 
-/*
- * We cannot probe for an IO mapped card either, although we can check that
+/* We cannot probe for an IO mapped card either, although we can check that
  * it's where we were told it was, and even autoirq
  */
 static int __init com90io_probe(struct net_device *dev)
@@ -151,71 +132,78 @@
 	int ioaddr = dev->base_addr, status;
 	unsigned long airqmask;
 
-	BUGLVL(D_NORMAL) printk(VERSION);
-	BUGLVL(D_NORMAL) printk("E-mail me if you actually test this driver, please!\n");
+	if (BUGLVL(D_NORMAL)) {
+		pr_info("%s\n", "COM90xx IO-mapped mode support (by David Woodhouse et el.)");
+		pr_info("E-mail me if you actually test this driver, please!\n");
+	}
 
 	if (!ioaddr) {
-		BUGMSG(D_NORMAL, "No autoprobe for IO mapped cards; you "
-		       "must specify the base address!\n");
+		arc_printk(D_NORMAL, dev, "No autoprobe for IO mapped cards; you must specify the base address!\n");
 		return -ENODEV;
 	}
 	if (!request_region(ioaddr, ARCNET_TOTAL_SIZE, "com90io probe")) {
-		BUGMSG(D_INIT_REASONS, "IO request_region %x-%x failed.\n",
-		       ioaddr, ioaddr + ARCNET_TOTAL_SIZE - 1);
+		arc_printk(D_INIT_REASONS, dev, "IO request_region %x-%x failed\n",
+			   ioaddr, ioaddr + ARCNET_TOTAL_SIZE - 1);
 		return -ENXIO;
 	}
-	if (ASTATUS() == 0xFF) {
-		BUGMSG(D_INIT_REASONS, "IO address %x empty\n", ioaddr);
+	if (arcnet_inb(ioaddr, COM9026_REG_R_STATUS) == 0xFF) {
+		arc_printk(D_INIT_REASONS, dev, "IO address %x empty\n",
+			   ioaddr);
 		goto err_out;
 	}
-	inb(_RESET);
+	arcnet_inb(ioaddr, COM9026_REG_R_RESET);
 	mdelay(RESETtime);
 
-	status = ASTATUS();
+	status = arcnet_inb(ioaddr, COM9026_REG_R_STATUS);
 
 	if ((status & 0x9D) != (NORXflag | RECONflag | TXFREEflag | RESETflag)) {
-		BUGMSG(D_INIT_REASONS, "Status invalid (%Xh).\n", status);
+		arc_printk(D_INIT_REASONS, dev, "Status invalid (%Xh)\n",
+			   status);
 		goto err_out;
 	}
-	BUGMSG(D_INIT_REASONS, "Status after reset: %X\n", status);
+	arc_printk(D_INIT_REASONS, dev, "Status after reset: %X\n", status);
 
-	ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
+	arcnet_outb(CFLAGScmd | RESETclear | CONFIGclear,
+		    ioaddr, COM9026_REG_W_COMMAND);
 
-	BUGMSG(D_INIT_REASONS, "Status after reset acknowledged: %X\n", status);
+	arc_printk(D_INIT_REASONS, dev, "Status after reset acknowledged: %X\n",
+		   status);
 
-	status = ASTATUS();
+	status = arcnet_inb(ioaddr, COM9026_REG_R_STATUS);
 
 	if (status & RESETflag) {
-		BUGMSG(D_INIT_REASONS, "Eternal reset (status=%Xh)\n", status);
+		arc_printk(D_INIT_REASONS, dev, "Eternal reset (status=%Xh)\n",
+			   status);
 		goto err_out;
 	}
-	outb((0x16 | IOMAPflag) & ~ENABLE16flag, _CONFIG);
+	arcnet_outb((0x16 | IOMAPflag) & ~ENABLE16flag,
+		    ioaddr, COM9026_REG_RW_CONFIG);
 
 	/* Read first loc'n of memory */
 
-	outb(AUTOINCflag, _ADDR_HI);
-	outb(0, _ADDR_LO);
+	arcnet_outb(AUTOINCflag, ioaddr, COM9026_REG_W_ADDR_HI);
+	arcnet_outb(0, ioaddr,  COM9026_REG_W_ADDR_LO);
 
-	if ((status = inb(_MEMDATA)) != 0xd1) {
-		BUGMSG(D_INIT_REASONS, "Signature byte not found"
-		       " (%Xh instead).\n", status);
+	status = arcnet_inb(ioaddr, COM9026_REG_RW_MEMDATA);
+	if (status != 0xd1) {
+		arc_printk(D_INIT_REASONS, dev, "Signature byte not found (%Xh instead).\n",
+			   status);
 		goto err_out;
 	}
 	if (!dev->irq) {
-		/*
-		 * if we do this, we're sure to get an IRQ since the
+		/* if we do this, we're sure to get an IRQ since the
 		 * card has just reset and the NORXflag is on until
 		 * we tell it to start receiving.
 		 */
 
 		airqmask = probe_irq_on();
-		outb(NORXflag, _INTMASK);
+		arcnet_outb(NORXflag, ioaddr, COM9026_REG_W_INTMASK);
 		udelay(1);
-		outb(0, _INTMASK);
+		arcnet_outb(0, ioaddr, COM9026_REG_W_INTMASK);
 		dev->irq = probe_irq_off(airqmask);
 
 		if ((int)dev->irq <= 0) {
-			BUGMSG(D_INIT_REASONS, "Autoprobe IRQ failed\n");
+			arc_printk(D_INIT_REASONS, dev, "Autoprobe IRQ failed\n");
 			goto err_out;
 		}
 	}
@@ -227,7 +215,6 @@
 	return -ENODEV;
 }
 
-
 /* Set up the struct net_device associated with this card.  Called after
  * probing succeeds.
  */
@@ -238,12 +225,14 @@
 	int err;
 
 	/* Reserve the irq */
-	if (request_irq(dev->irq, arcnet_interrupt, 0, "arcnet (COM90xx-IO)", dev)) {
-		BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
+	if (request_irq(dev->irq, arcnet_interrupt, 0,
+			"arcnet (COM90xx-IO)", dev)) {
+		arc_printk(D_NORMAL, dev, "Can't get IRQ %d!\n", dev->irq);
 		return -ENODEV;
 	}
 	/* Reserve the I/O region */
-	if (!request_region(dev->base_addr, ARCNET_TOTAL_SIZE, "arcnet (COM90xx-IO)")) {
+	if (!request_region(dev->base_addr, ARCNET_TOTAL_SIZE,
+			    "arcnet (COM90xx-IO)")) {
 		free_irq(dev->irq, dev);
 		return -EBUSY;
 	}
@@ -259,7 +248,7 @@
 	lp->hw.copy_from_card = com90io_copy_from_card;
 
 	lp->config = (0x16 | IOMAPflag) & ~ENABLE16flag;
-	SETCONF();
+	arcnet_outb(lp->config, ioaddr, COM9026_REG_RW_CONFIG);
 
 	/* get and check the station ID from offset 1 in shmem */
 
@@ -267,21 +256,20 @@
 
 	err = register_netdev(dev);
 	if (err) {
-		outb((inb(_CONFIG) & ~IOMAPflag), _CONFIG);
+		arcnet_outb(arcnet_inb(ioaddr, COM9026_REG_RW_CONFIG) & ~IOMAPflag,
+			    ioaddr, COM9026_REG_RW_CONFIG);
 		free_irq(dev->irq, dev);
 		release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
 		return err;
 	}
 
-	BUGMSG(D_NORMAL, "COM90IO: station %02Xh found at %03lXh, IRQ %d.\n",
-	       dev->dev_addr[0], dev->base_addr, dev->irq);
+	arc_printk(D_NORMAL, dev, "COM90IO: station %02Xh found at %03lXh, IRQ %d.\n",
+		   dev->dev_addr[0], dev->base_addr, dev->irq);
 
 	return 0;
 }
 
-
-/*
- * Do a hardware reset on the card, and set up necessary registers.
+/* Do a hardware reset on the card, and set up necessary registers.
  *
  * This should be called as little as possible, because it disrupts the
  * token on the network (causes a RECON) and requires a significant delay.
@@ -293,67 +281,66 @@
 	struct arcnet_local *lp = netdev_priv(dev);
 	short ioaddr = dev->base_addr;
 
-	BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", dev->name, ASTATUS());
+	arc_printk(D_INIT, dev, "Resetting %s (status=%02Xh)\n",
+		   dev->name, arcnet_inb(ioaddr, COM9026_REG_R_STATUS));
 
 	if (really_reset) {
 		/* reset the card */
-		inb(_RESET);
+		arcnet_inb(ioaddr, COM9026_REG_R_RESET);
 		mdelay(RESETtime);
 	}
 	/* Set the thing to IO-mapped, 8-bit  mode */
 	lp->config = (0x1C | IOMAPflag) & ~ENABLE16flag;
-	SETCONF();
+	arcnet_outb(lp->config, ioaddr, COM9026_REG_RW_CONFIG);
 
-	ACOMMAND(CFLAGScmd | RESETclear);	/* clear flags & end reset */
-	ACOMMAND(CFLAGScmd | CONFIGclear);
+	arcnet_outb(CFLAGScmd | RESETclear, ioaddr, COM9026_REG_W_COMMAND);
+					/* clear flags & end reset */
+	arcnet_outb(CFLAGScmd | CONFIGclear, ioaddr, COM9026_REG_W_COMMAND);
 
 	/* verify that the ARCnet signature byte is present */
 	if (get_buffer_byte(dev, 0) != TESTvalue) {
-		BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
+		arc_printk(D_NORMAL, dev, "reset failed: TESTvalue not present.\n");
 		return 1;
 	}
 	/* enable extended (512-byte) packets */
-	ACOMMAND(CONFIGcmd | EXTconf);
-
+	arcnet_outb(CONFIGcmd | EXTconf, ioaddr, COM9026_REG_W_COMMAND);
 	/* done!  return success. */
 	return 0;
 }
 
-
 static void com90io_command(struct net_device *dev, int cmd)
 {
 	short ioaddr = dev->base_addr;
 
-	ACOMMAND(cmd);
+	arcnet_outb(cmd, ioaddr, COM9026_REG_W_COMMAND);
 }
 
-
 static int com90io_status(struct net_device *dev)
 {
 	short ioaddr = dev->base_addr;
 
-	return ASTATUS();
+	return arcnet_inb(ioaddr, COM9026_REG_R_STATUS);
 }
 
-
 static void com90io_setmask(struct net_device *dev, int mask)
 {
 	short ioaddr = dev->base_addr;
 
-	AINTMASK(mask);
+	arcnet_outb(mask, ioaddr, COM9026_REG_W_INTMASK);
 }
 
-static void com90io_copy_to_card(struct net_device *dev, int bufnum, int offset,
-				 void *buf, int count)
+static void com90io_copy_to_card(struct net_device *dev, int bufnum,
+				 int offset, void *buf, int count)
 {
-	TIME("put_whole_buffer", count, put_whole_buffer(dev, bufnum * 512 + offset, count, buf));
+	TIME(dev, "put_whole_buffer", count,
+	     put_whole_buffer(dev, bufnum * 512 + offset, count, buf));
 }
 
-
-static void com90io_copy_from_card(struct net_device *dev, int bufnum, int offset,
-				   void *buf, int count)
+static void com90io_copy_from_card(struct net_device *dev, int bufnum,
+				   int offset, void *buf, int count)
 {
-	TIME("get_whole_buffer", count, get_whole_buffer(dev, bufnum * 512 + offset, count, buf));
+	TIME(dev, "get_whole_buffer", count,
+	     get_whole_buffer(dev, bufnum * 512 + offset, count, buf));
 }
 
 static int io;			/* use the insmod io= irq= shmem= options */
@@ -369,12 +356,13 @@
 static int __init com90io_setup(char *s)
 {
 	int ints[4];
+
 	s = get_options(s, 4, ints);
 	if (!ints[0])
 		return 0;
 	switch (ints[0]) {
 	default:		/* ERROR */
-		printk("com90io: Too many arguments.\n");
+		pr_err("Too many arguments\n");
 	case 2:		/* IRQ */
 		irq = ints[2];
 	case 1:		/* IO address */
@@ -421,8 +409,11 @@
 
 	unregister_netdev(dev);
 
-	/* Set the thing back to MMAP mode, in case the old driver is loaded later */
-	outb((inb(_CONFIG) & ~IOMAPflag), _CONFIG);
+	/* In case the old driver is loaded later,
+	 * set the thing back to MMAP mode
+	 */
+	arcnet_outb(arcnet_inb(ioaddr, COM9026_REG_RW_CONFIG) & ~IOMAPflag,
+		    ioaddr, COM9026_REG_RW_CONFIG);
 
 	free_irq(dev->irq, dev);
 	release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index b80fbe4..0d9b45f 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -1,6 +1,6 @@
 /*
  * Linux ARCnet driver - COM90xx chipset (memory-mapped buffers)
- * 
+ *
  * Written 1994-1999 by Avery Pennarun.
  * Written 1999 by Martin Mares <mj@ucw.cz>.
  * Derived from skeleton.c by Donald Becker.
@@ -24,6 +24,9 @@
  *
  * **********************
  */
+
+#define pr_fmt(fmt) "arcnet:" KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/init.h>
@@ -32,12 +35,10 @@
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/slab.h>
-#include <asm/io.h>
-#include <linux/arcdevice.h>
+#include <linux/io.h>
 
-
-#define VERSION "arcnet: COM90xx chipset support\n"
-
+#include "arcdevice.h"
+#include "com9026.h"
 
 /* Define this to speed up the autoprobe by assuming if only one io port and
  * shmem are left in the list at Stage 5, they must correspond to each
@@ -53,7 +54,6 @@
  */
 #undef FAST_PROBE
 
-
 /* Internal function declarations */
 static int com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem *);
 static void com90xx_command(struct net_device *dev, int command);
@@ -62,8 +62,8 @@
 static int com90xx_reset(struct net_device *dev, int really_reset);
 static void com90xx_copy_to_card(struct net_device *dev, int bufnum, int offset,
 				 void *buf, int count);
-static void com90xx_copy_from_card(struct net_device *dev, int bufnum, int offset,
-				   void *buf, int count);
+static void com90xx_copy_from_card(struct net_device *dev, int bufnum,
+				   int offset, void *buf, int count);
 
 /* Known ARCnet cards */
 
@@ -77,26 +77,7 @@
 
 /* Amount of I/O memory used by the card */
 #define BUFFER_SIZE (512)
-#define MIRROR_SIZE (BUFFER_SIZE*4)
-
-/* COM 9026 controller chip --> ARCnet register addresses */
-#define _INTMASK (ioaddr+0)	/* writable */
-#define _STATUS  (ioaddr+0)	/* readable */
-#define _COMMAND (ioaddr+1)	/* writable, returns random vals on read (?) */
-#define _CONFIG  (ioaddr+2)	/* Configuration register */
-#define _RESET   (ioaddr+8)	/* software reset (on read) */
-#define _MEMDATA (ioaddr+12)	/* Data port for IO-mapped memory */
-#define _ADDR_HI (ioaddr+15)	/* Control registers for said */
-#define _ADDR_LO (ioaddr+14)
-
-#undef ASTATUS
-#undef ACOMMAND
-#undef AINTMASK
-
-#define ASTATUS()	inb(_STATUS)
-#define ACOMMAND(cmd) 	outb((cmd),_COMMAND)
-#define AINTMASK(msk)	outb((msk),_INTMASK)
-
+#define MIRROR_SIZE (BUFFER_SIZE * 4)
 
 static int com90xx_skip_probe __initdata = 0;
 
@@ -116,8 +97,7 @@
 {
 	int count, status, ioaddr, numprint, airq, openparen = 0;
 	unsigned long airqmask;
-	int ports[(0x3f0 - 0x200) / 16 + 1] =
-	{0};
+	int ports[(0x3f0 - 0x200) / 16 + 1] = {	0 };
 	unsigned long *shmems;
 	void __iomem **iomem;
 	int numports, numshmems, *port;
@@ -127,18 +107,19 @@
 	if (!io && !irq && !shmem && !*device && com90xx_skip_probe)
 		return;
 
-	shmems = kzalloc(((0x100000-0xa0000) / 0x800) * sizeof(unsigned long),
+	shmems = kzalloc(((0x100000 - 0xa0000) / 0x800) * sizeof(unsigned long),
 			 GFP_KERNEL);
 	if (!shmems)
 		return;
-	iomem = kzalloc(((0x100000-0xa0000) / 0x800) * sizeof(void __iomem *),
-			 GFP_KERNEL);
+	iomem = kzalloc(((0x100000 - 0xa0000) / 0x800) * sizeof(void __iomem *),
+			GFP_KERNEL);
 	if (!iomem) {
 		kfree(shmems);
 		return;
 	}
 
-	BUGLVL(D_NORMAL) printk(VERSION);
+	if (BUGLVL(D_NORMAL))
+		pr_info("%s\n", "COM90xx chipset support");
 
 	/* set up the arrays where we'll store the possible probe addresses */
 	numports = numshmems = 0;
@@ -161,38 +142,43 @@
 		numprint++;
 		numprint %= 8;
 		if (!numprint) {
-			BUGMSG2(D_INIT, "\n");
-			BUGMSG2(D_INIT, "S1: ");
+			arc_cont(D_INIT, "\n");
+			arc_cont(D_INIT, "S1: ");
 		}
-		BUGMSG2(D_INIT, "%Xh ", *port);
+		arc_cont(D_INIT, "%Xh ", *port);
 
 		ioaddr = *port;
 
-		if (!request_region(*port, ARCNET_TOTAL_SIZE, "arcnet (90xx)")) {
-			BUGMSG2(D_INIT_REASONS, "(request_region)\n");
-			BUGMSG2(D_INIT_REASONS, "S1: ");
-			BUGLVL(D_INIT_REASONS) numprint = 0;
+		if (!request_region(*port, ARCNET_TOTAL_SIZE,
+				    "arcnet (90xx)")) {
+			arc_cont(D_INIT_REASONS, "(request_region)\n");
+			arc_cont(D_INIT_REASONS, "S1: ");
+			if (BUGLVL(D_INIT_REASONS))
+				numprint = 0;
 			*port-- = ports[--numports];
 			continue;
 		}
-		if (ASTATUS() == 0xFF) {
-			BUGMSG2(D_INIT_REASONS, "(empty)\n");
-			BUGMSG2(D_INIT_REASONS, "S1: ");
-			BUGLVL(D_INIT_REASONS) numprint = 0;
+		if (arcnet_inb(ioaddr, COM9026_REG_R_STATUS) == 0xFF) {
+			arc_cont(D_INIT_REASONS, "(empty)\n");
+			arc_cont(D_INIT_REASONS, "S1: ");
+			if (BUGLVL(D_INIT_REASONS))
+				numprint = 0;
 			release_region(*port, ARCNET_TOTAL_SIZE);
 			*port-- = ports[--numports];
 			continue;
 		}
-		inb(_RESET);	/* begin resetting card */
+		/* begin resetting card */
+		arcnet_inb(ioaddr, COM9026_REG_R_RESET);
 
-		BUGMSG2(D_INIT_REASONS, "\n");
-		BUGMSG2(D_INIT_REASONS, "S1: ");
-		BUGLVL(D_INIT_REASONS) numprint = 0;
+		arc_cont(D_INIT_REASONS, "\n");
+		arc_cont(D_INIT_REASONS, "S1: ");
+		if (BUGLVL(D_INIT_REASONS))
+			numprint = 0;
 	}
-	BUGMSG2(D_INIT, "\n");
+	arc_cont(D_INIT, "\n");
 
 	if (!numports) {
-		BUGMSG2(D_NORMAL, "S1: No ARCnet cards found.\n");
+		arc_cont(D_NORMAL, "S1: No ARCnet cards found.\n");
 		kfree(shmems);
 		kfree(iomem);
 		return;
@@ -206,12 +192,12 @@
 		numprint++;
 		numprint %= 8;
 		if (!numprint) {
-			BUGMSG2(D_INIT, "\n");
-			BUGMSG2(D_INIT, "S2: ");
+			arc_cont(D_INIT, "\n");
+			arc_cont(D_INIT, "S2: ");
 		}
-		BUGMSG2(D_INIT, "%Xh ", *port);
+		arc_cont(D_INIT, "%Xh ", *port);
 	}
-	BUGMSG2(D_INIT, "\n");
+	arc_cont(D_INIT, "\n");
 	mdelay(RESETtime);
 
 	/* Stage 3: abandon any shmem addresses that don't have the signature
@@ -224,29 +210,33 @@
 		numprint++;
 		numprint %= 8;
 		if (!numprint) {
-			BUGMSG2(D_INIT, "\n");
-			BUGMSG2(D_INIT, "S3: ");
+			arc_cont(D_INIT, "\n");
+			arc_cont(D_INIT, "S3: ");
 		}
-		BUGMSG2(D_INIT, "%lXh ", *p);
+		arc_cont(D_INIT, "%lXh ", *p);
 
 		if (!request_mem_region(*p, MIRROR_SIZE, "arcnet (90xx)")) {
-			BUGMSG2(D_INIT_REASONS, "(request_mem_region)\n");
-			BUGMSG2(D_INIT_REASONS, "Stage 3: ");
-			BUGLVL(D_INIT_REASONS) numprint = 0;
+			arc_cont(D_INIT_REASONS, "(request_mem_region)\n");
+			arc_cont(D_INIT_REASONS, "Stage 3: ");
+			if (BUGLVL(D_INIT_REASONS))
+				numprint = 0;
 			goto out;
 		}
 		base = ioremap(*p, MIRROR_SIZE);
 		if (!base) {
-			BUGMSG2(D_INIT_REASONS, "(ioremap)\n");
-			BUGMSG2(D_INIT_REASONS, "Stage 3: ");
-			BUGLVL(D_INIT_REASONS) numprint = 0;
+			arc_cont(D_INIT_REASONS, "(ioremap)\n");
+			arc_cont(D_INIT_REASONS, "Stage 3: ");
+			if (BUGLVL(D_INIT_REASONS))
+				numprint = 0;
 			goto out1;
 		}
-		if (readb(base) != TESTvalue) {
-			BUGMSG2(D_INIT_REASONS, "(%02Xh != %02Xh)\n",
-				readb(base), TESTvalue);
-			BUGMSG2(D_INIT_REASONS, "S3: ");
-			BUGLVL(D_INIT_REASONS) numprint = 0;
+		if (arcnet_readb(base, COM9026_REG_R_STATUS) != TESTvalue) {
+			arc_cont(D_INIT_REASONS, "(%02Xh != %02Xh)\n",
+				 arcnet_readb(base, COM9026_REG_R_STATUS),
+				 TESTvalue);
+			arc_cont(D_INIT_REASONS, "S3: ");
+			if (BUGLVL(D_INIT_REASONS))
+				numprint = 0;
 			goto out2;
 		}
 		/* By writing 0x42 to the TESTvalue location, we also make
@@ -254,15 +244,16 @@
 		 * in another pass through this loop, they will be discarded
 		 * because *cptr != TESTvalue.
 		 */
-		writeb(0x42, base);
-		if (readb(base) != 0x42) {
-			BUGMSG2(D_INIT_REASONS, "(read only)\n");
-			BUGMSG2(D_INIT_REASONS, "S3: ");
+		arcnet_writeb(0x42, base, COM9026_REG_W_INTMASK);
+		if (arcnet_readb(base, COM9026_REG_R_STATUS) != 0x42) {
+			arc_cont(D_INIT_REASONS, "(read only)\n");
+			arc_cont(D_INIT_REASONS, "S3: ");
 			goto out2;
 		}
-		BUGMSG2(D_INIT_REASONS, "\n");
-		BUGMSG2(D_INIT_REASONS, "S3: ");
-		BUGLVL(D_INIT_REASONS) numprint = 0;
+		arc_cont(D_INIT_REASONS, "\n");
+		arc_cont(D_INIT_REASONS, "S3: ");
+		if (BUGLVL(D_INIT_REASONS))
+			numprint = 0;
 		iomem[index] = base;
 		continue;
 	out2:
@@ -273,10 +264,10 @@
 		*p-- = shmems[--numshmems];
 		index--;
 	}
-	BUGMSG2(D_INIT, "\n");
+	arc_cont(D_INIT, "\n");
 
 	if (!numshmems) {
-		BUGMSG2(D_NORMAL, "S3: No ARCnet cards found.\n");
+		arc_cont(D_NORMAL, "S3: No ARCnet cards found.\n");
 		for (port = &ports[0]; port < ports + numports; port++)
 			release_region(*port, ARCNET_TOTAL_SIZE);
 		kfree(shmems);
@@ -291,12 +282,12 @@
 		numprint++;
 		numprint %= 8;
 		if (!numprint) {
-			BUGMSG2(D_INIT, "\n");
-			BUGMSG2(D_INIT, "S4: ");
+			arc_cont(D_INIT, "\n");
+			arc_cont(D_INIT, "S4: ");
 		}
-		BUGMSG2(D_INIT, "%lXh ", *p);
+		arc_cont(D_INIT, "%lXh ", *p);
 	}
-	BUGMSG2(D_INIT, "\n");
+	arc_cont(D_INIT, "\n");
 
 	/* Stage 5: for any ports that have the correct status, can disable
 	 * the RESET flag, and (if no irq is given) generate an autoirq,
@@ -308,33 +299,37 @@
 	numprint = -1;
 	for (port = &ports[0]; port < ports + numports; port++) {
 		int found = 0;
+
 		numprint++;
 		numprint %= 8;
 		if (!numprint) {
-			BUGMSG2(D_INIT, "\n");
-			BUGMSG2(D_INIT, "S5: ");
+			arc_cont(D_INIT, "\n");
+			arc_cont(D_INIT, "S5: ");
 		}
-		BUGMSG2(D_INIT, "%Xh ", *port);
+		arc_cont(D_INIT, "%Xh ", *port);
 
 		ioaddr = *port;
-		status = ASTATUS();
+		status = arcnet_inb(ioaddr, COM9026_REG_R_STATUS);
 
 		if ((status & 0x9D)
 		    != (NORXflag | RECONflag | TXFREEflag | RESETflag)) {
-			BUGMSG2(D_INIT_REASONS, "(status=%Xh)\n", status);
-			BUGMSG2(D_INIT_REASONS, "S5: ");
-			BUGLVL(D_INIT_REASONS) numprint = 0;
+			arc_cont(D_INIT_REASONS, "(status=%Xh)\n", status);
+			arc_cont(D_INIT_REASONS, "S5: ");
+			if (BUGLVL(D_INIT_REASONS))
+				numprint = 0;
 			release_region(*port, ARCNET_TOTAL_SIZE);
 			*port-- = ports[--numports];
 			continue;
 		}
-		ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
-		status = ASTATUS();
+		arcnet_outb(CFLAGScmd | RESETclear | CONFIGclear,
+			    ioaddr, COM9026_REG_W_COMMAND);
+		status = arcnet_inb(ioaddr, COM9026_REG_R_STATUS);
 		if (status & RESETflag) {
-			BUGMSG2(D_INIT_REASONS, " (eternal reset, status=%Xh)\n",
-				status);
-			BUGMSG2(D_INIT_REASONS, "S5: ");
-			BUGLVL(D_INIT_REASONS) numprint = 0;
+			arc_cont(D_INIT_REASONS, " (eternal reset, status=%Xh)\n",
+				 status);
+			arc_cont(D_INIT_REASONS, "S5: ");
+			if (BUGLVL(D_INIT_REASONS))
+				numprint = 0;
 			release_region(*port, ARCNET_TOTAL_SIZE);
 			*port-- = ports[--numports];
 			continue;
@@ -348,15 +343,16 @@
 			 * we tell it to start receiving.
 			 */
 			airqmask = probe_irq_on();
-			AINTMASK(NORXflag);
+			arcnet_outb(NORXflag, ioaddr, COM9026_REG_W_INTMASK);
 			udelay(1);
-			AINTMASK(0);
+			arcnet_outb(0, ioaddr, COM9026_REG_W_INTMASK);
 			airq = probe_irq_off(airqmask);
 
 			if (airq <= 0) {
-				BUGMSG2(D_INIT_REASONS, "(airq=%d)\n", airq);
-				BUGMSG2(D_INIT_REASONS, "S5: ");
-				BUGLVL(D_INIT_REASONS) numprint = 0;
+				arc_cont(D_INIT_REASONS, "(airq=%d)\n", airq);
+				arc_cont(D_INIT_REASONS, "S5: ");
+				if (BUGLVL(D_INIT_REASONS))
+					numprint = 0;
 				release_region(*port, ARCNET_TOTAL_SIZE);
 				*port-- = ports[--numports];
 				continue;
@@ -365,7 +361,7 @@
 			airq = irq;
 		}
 
-		BUGMSG2(D_INIT, "(%d,", airq);
+		arc_cont(D_INIT, "(%d,", airq);
 		openparen = 1;
 
 		/* Everything seems okay.  But which shmem, if any, puts
@@ -376,14 +372,15 @@
 		 */
 #ifdef FAST_PROBE
 		if (numports > 1 || numshmems > 1) {
-			inb(_RESET);
+			arcnet_inb(ioaddr, COM9026_REG_R_RESET);
 			mdelay(RESETtime);
 		} else {
 			/* just one shmem and port, assume they match */
-			writeb(TESTvalue, iomem[0]);
+			arcnet_writeb(TESTvalue, iomem[0],
+				      COM9026_REG_W_INTMASK);
 		}
 #else
-		inb(_RESET);
+		arcnet_inb(ioaddr, COM9026_REG_R_RESET);
 		mdelay(RESETtime);
 #endif
 
@@ -391,8 +388,8 @@
 			u_long ptr = shmems[index];
 			void __iomem *base = iomem[index];
 
-			if (readb(base) == TESTvalue) {	/* found one */
-				BUGMSG2(D_INIT, "%lXh)\n", *p);
+			if (arcnet_readb(base, COM9026_REG_R_STATUS) == TESTvalue) {	/* found one */
+				arc_cont(D_INIT, "%lXh)\n", *p);
 				openparen = 0;
 
 				/* register the card */
@@ -405,25 +402,30 @@
 				iomem[index] = iomem[numshmems];
 				break;	/* go to the next I/O port */
 			} else {
-				BUGMSG2(D_INIT_REASONS, "%Xh-", readb(base));
+				arc_cont(D_INIT_REASONS, "%Xh-",
+					 arcnet_readb(base, COM9026_REG_R_STATUS));
 			}
 		}
 
 		if (openparen) {
-			BUGLVL(D_INIT) printk("no matching shmem)\n");
-			BUGLVL(D_INIT_REASONS) printk("S5: ");
-			BUGLVL(D_INIT_REASONS) numprint = 0;
+			if (BUGLVL(D_INIT))
+				pr_cont("no matching shmem)\n");
+			if (BUGLVL(D_INIT_REASONS)) {
+				pr_cont("S5: ");
+				numprint = 0;
+			}
 		}
 		if (!found)
 			release_region(*port, ARCNET_TOTAL_SIZE);
 		*port-- = ports[--numports];
 	}
 
-	BUGLVL(D_INIT_REASONS) printk("\n");
+	if (BUGLVL(D_INIT_REASONS))
+		pr_cont("\n");
 
 	/* Now put back TESTvalue on all leftover shmems. */
 	for (index = 0; index < numshmems; index++) {
-		writeb(TESTvalue, iomem[index]);
+		arcnet_writeb(TESTvalue, iomem[index], COM9026_REG_W_INTMASK);
 		iounmap(iomem[index]);
 		release_mem_region(shmems[index], MIRROR_SIZE);
 	}
@@ -441,7 +443,7 @@
 
 	p = ioremap(addr, size);
 	if (p) {
-		if (readb(p) == TESTvalue)
+		if (arcnet_readb(p, COM9026_REG_R_STATUS) == TESTvalue)
 			res = 1;
 		else
 			res = 0;
@@ -455,7 +457,8 @@
 /* Set up the struct net_device associated with this card.  Called after
  * probing succeeds.
  */
-static int __init com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem *p)
+static int __init com90xx_found(int ioaddr, int airq, u_long shmem,
+				void __iomem *p)
 {
 	struct net_device *dev = NULL;
 	struct arcnet_local *lp;
@@ -465,7 +468,7 @@
 	/* allocate struct net_device */
 	dev = alloc_arcdev(device);
 	if (!dev) {
-		BUGMSG2(D_NORMAL, "com90xx: Can't allocate device!\n");
+		arc_cont(D_NORMAL, "com90xx: Can't allocate device!\n");
 		iounmap(p);
 		release_mem_region(shmem, MIRROR_SIZE);
 		return -ENOMEM;
@@ -478,7 +481,7 @@
 	 * 2k (or there are no mirrors at all) but on some, it's 4k.
 	 */
 	mirror_size = MIRROR_SIZE;
-	if (readb(p) == TESTvalue &&
+	if (arcnet_readb(p, COM9026_REG_R_STATUS) == TESTvalue &&
 	    check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 &&
 	    check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1)
 		mirror_size = 2 * MIRROR_SIZE;
@@ -499,12 +502,14 @@
 	iounmap(p);
 	release_mem_region(shmem, MIRROR_SIZE);
 
-	if (!request_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1, "arcnet (90xx)"))
+	if (!request_mem_region(dev->mem_start,
+				dev->mem_end - dev->mem_start + 1,
+				"arcnet (90xx)"))
 		goto err_free_dev;
 
 	/* reserve the irq */
 	if (request_irq(airq, arcnet_interrupt, 0, "arcnet (90xx)", dev)) {
-		BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", airq);
+		arc_printk(D_NORMAL, dev, "Can't get IRQ %d!\n", airq);
 		goto err_release_mem;
 	}
 	dev->irq = airq;
@@ -518,22 +523,23 @@
 	lp->hw.owner = THIS_MODULE;
 	lp->hw.copy_to_card = com90xx_copy_to_card;
 	lp->hw.copy_from_card = com90xx_copy_from_card;
-	lp->mem_start = ioremap(dev->mem_start, dev->mem_end - dev->mem_start + 1);
+	lp->mem_start = ioremap(dev->mem_start,
+				dev->mem_end - dev->mem_start + 1);
 	if (!lp->mem_start) {
-		BUGMSG(D_NORMAL, "Can't remap device memory!\n");
+		arc_printk(D_NORMAL, dev, "Can't remap device memory!\n");
 		goto err_free_irq;
 	}
 
 	/* get and check the station ID from offset 1 in shmem */
-	dev->dev_addr[0] = readb(lp->mem_start + 1);
+	dev->dev_addr[0] = arcnet_readb(lp->mem_start, COM9026_REG_R_STATION);
 
 	dev->base_addr = ioaddr;
 
-	BUGMSG(D_NORMAL, "COM90xx station %02Xh found at %03lXh, IRQ %d, "
-	       "ShMem %lXh (%ld*%xh).\n",
-	       dev->dev_addr[0],
-	       dev->base_addr, dev->irq, dev->mem_start,
-	 (dev->mem_end - dev->mem_start + 1) / mirror_size, mirror_size);
+	arc_printk(D_NORMAL, dev, "COM90xx station %02Xh found at %03lXh, IRQ %d, ShMem %lXh (%ld*%xh).\n",
+		   dev->dev_addr[0],
+		   dev->base_addr, dev->irq, dev->mem_start,
+		   (dev->mem_end - dev->mem_start + 1) / mirror_size,
+		   mirror_size);
 
 	if (register_netdev(dev))
 		goto err_unmap;
@@ -552,34 +558,29 @@
 	return -EIO;
 }
 
-
 static void com90xx_command(struct net_device *dev, int cmd)
 {
 	short ioaddr = dev->base_addr;
 
-	ACOMMAND(cmd);
+	arcnet_outb(cmd, ioaddr, COM9026_REG_W_COMMAND);
 }
 
-
 static int com90xx_status(struct net_device *dev)
 {
 	short ioaddr = dev->base_addr;
 
-	return ASTATUS();
+	return arcnet_inb(ioaddr, COM9026_REG_R_STATUS);
 }
 
-
 static void com90xx_setmask(struct net_device *dev, int mask)
 {
 	short ioaddr = dev->base_addr;
 
-	AINTMASK(mask);
+	arcnet_outb(mask, ioaddr, COM9026_REG_W_INTMASK);
 }
 
-
-/*
- * Do a hardware reset on the card, and set up necessary registers.
- * 
+/* Do a hardware reset on the card, and set up necessary registers.
+ *
  * This should be called as little as possible, because it disrupts the
  * token on the network (causes a RECON) and requires a significant delay.
  *
@@ -590,53 +591,58 @@
 	struct arcnet_local *lp = netdev_priv(dev);
 	short ioaddr = dev->base_addr;
 
-	BUGMSG(D_INIT, "Resetting (status=%02Xh)\n", ASTATUS());
+	arc_printk(D_INIT, dev, "Resetting (status=%02Xh)\n",
+		   arcnet_inb(ioaddr, COM9026_REG_R_STATUS));
 
 	if (really_reset) {
 		/* reset the card */
-		inb(_RESET);
+		arcnet_inb(ioaddr, COM9026_REG_R_RESET);
 		mdelay(RESETtime);
 	}
-	ACOMMAND(CFLAGScmd | RESETclear);	/* clear flags & end reset */
-	ACOMMAND(CFLAGScmd | CONFIGclear);
+	/* clear flags & end reset */
+	arcnet_outb(CFLAGScmd | RESETclear, ioaddr, COM9026_REG_W_COMMAND);
+	arcnet_outb(CFLAGScmd | CONFIGclear, ioaddr, COM9026_REG_W_COMMAND);
 
+#if 0
 	/* don't do this until we verify that it doesn't hurt older cards! */
-	/* outb(inb(_CONFIG) | ENABLE16flag, _CONFIG); */
+	arcnet_outb(arcnet_inb(ioaddr, COM9026_REG_RW_CONFIG) | ENABLE16flag,
+		    ioaddr, COM9026_REG_RW_CONFIG);
+#endif
 
 	/* verify that the ARCnet signature byte is present */
-	if (readb(lp->mem_start) != TESTvalue) {
+	if (arcnet_readb(lp->mem_start, COM9026_REG_R_STATUS) != TESTvalue) {
 		if (really_reset)
-			BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
+			arc_printk(D_NORMAL, dev, "reset failed: TESTvalue not present.\n");
 		return 1;
 	}
 	/* enable extended (512-byte) packets */
-	ACOMMAND(CONFIGcmd | EXTconf);
+	arcnet_outb(CONFIGcmd | EXTconf, ioaddr, COM9026_REG_W_COMMAND);
 
 	/* clean out all the memory to make debugging make more sense :) */
-	BUGLVL(D_DURING)
-	    memset_io(lp->mem_start, 0x42, 2048);
+	if (BUGLVL(D_DURING))
+		memset_io(lp->mem_start, 0x42, 2048);
 
 	/* done!  return success. */
 	return 0;
 }
 
-static void com90xx_copy_to_card(struct net_device *dev, int bufnum, int offset,
-				 void *buf, int count)
+static void com90xx_copy_to_card(struct net_device *dev, int bufnum,
+				 int offset, void *buf, int count)
 {
 	struct arcnet_local *lp = netdev_priv(dev);
 	void __iomem *memaddr = lp->mem_start + bufnum * 512 + offset;
-	TIME("memcpy_toio", count, memcpy_toio(memaddr, buf, count));
+
+	TIME(dev, "memcpy_toio", count, memcpy_toio(memaddr, buf, count));
 }
 
-
-static void com90xx_copy_from_card(struct net_device *dev, int bufnum, int offset,
-				   void *buf, int count)
+static void com90xx_copy_from_card(struct net_device *dev, int bufnum,
+				   int offset, void *buf, int count)
 {
 	struct arcnet_local *lp = netdev_priv(dev);
 	void __iomem *memaddr = lp->mem_start + bufnum * 512 + offset;
-	TIME("memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
-}
 
+	TIME(dev, "memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
+}
 
 MODULE_LICENSE("GPL");
 
@@ -664,7 +670,8 @@
 		free_irq(dev->irq, dev);
 		iounmap(lp->mem_start);
 		release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
-		release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
+		release_mem_region(dev->mem_start,
+				   dev->mem_end - dev->mem_start + 1);
 		free_netdev(dev);
 	}
 }
@@ -679,13 +686,13 @@
 
 	s = get_options(s, 8, ints);
 	if (!ints[0] && !*s) {
-		printk("com90xx: Disabled.\n");
+		pr_notice("Disabled\n");
 		return 1;
 	}
 
 	switch (ints[0]) {
 	default:		/* ERROR */
-		printk("com90xx: Too many arguments.\n");
+		pr_err("Too many arguments\n");
 	case 3:		/* Mem address */
 		shmem = ints[3];
 	case 2:		/* IRQ */
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index f81db40..4b1a754 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -1,6 +1,6 @@
 /*
  * Linux ARCnet driver - RFC1051 ("simple" standard) packet encapsulation
- * 
+ *
  * Written 1994-1999 by Avery Pennarun.
  * Derived from skeleton.c by Donald Becker.
  *
@@ -23,6 +23,9 @@
  *
  * **********************
  */
+
+#define pr_fmt(fmt) "arcnet:" KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/gfp.h>
 #include <linux/init.h>
@@ -30,10 +33,8 @@
 #include <net/arp.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
-#include <linux/arcdevice.h>
 
-#define VERSION "arcnet: RFC1051 \"simple standard\" (`s') encapsulation support loaded.\n"
-
+#include "arcdevice.h"
 
 static __be16 type_trans(struct sk_buff *skb, struct net_device *dev);
 static void rx(struct net_device *dev, int bufnum,
@@ -43,9 +44,7 @@
 static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
 		      int bufnum);
 
-
-static struct ArcProto rfc1051_proto =
-{
+static struct ArcProto rfc1051_proto = {
 	.suffix		= 's',
 	.mtu		= XMTU - RFC1051_HDR_SIZE,
 	.is_ip          = 1,
@@ -56,10 +55,9 @@
 	.ack_tx         = NULL
 };
 
-
 static int __init arcnet_rfc1051_init(void)
 {
-	printk(VERSION);
+	pr_info("%s\n", "RFC1051 \"simple standard\" (`s') encapsulation support loaded");
 
 	arc_proto_map[ARC_P_IP_RFC1051]
 	    = arc_proto_map[ARC_P_ARP_RFC1051]
@@ -82,14 +80,13 @@
 
 MODULE_LICENSE("GPL");
 
-/*
- * Determine a packet's protocol ID.
- * 
+/* Determine a packet's protocol ID.
+ *
  * With ARCnet we have to convert everything to Ethernet-style stuff.
  */
 static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
 {
-	struct archdr *pkt = (struct archdr *) skb->data;
+	struct archdr *pkt = (struct archdr *)skb->data;
 	struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
 	int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
 
@@ -97,9 +94,9 @@
 	skb_reset_mac_header(skb);
 	skb_pull(skb, hdr_size);
 
-	if (pkt->hard.dest == 0)
+	if (pkt->hard.dest == 0) {
 		skb->pkt_type = PACKET_BROADCAST;
-	else if (dev->flags & IFF_PROMISC) {
+	} else if (dev->flags & IFF_PROMISC) {
 		/* if we're not sending to ourselves :) */
 		if (pkt->hard.dest != dev->dev_addr[0])
 			skb->pkt_type = PACKET_OTHERHOST;
@@ -120,7 +117,6 @@
 	return htons(ETH_P_IP);
 }
 
-
 /* packet receiver */
 static void rx(struct net_device *dev, int bufnum,
 	       struct archdr *pkthdr, int length)
@@ -130,7 +126,7 @@
 	struct archdr *pkt = pkthdr;
 	int ofs;
 
-	BUGMSG(D_DURING, "it's a raw packet (length=%d)\n", length);
+	arc_printk(D_DURING, dev, "it's a raw packet (length=%d)\n", length);
 
 	if (length >= MinTU)
 		ofs = 512 - length;
@@ -138,15 +134,14 @@
 		ofs = 256 - length;
 
 	skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
-	if (skb == NULL) {
-		BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
+	if (!skb) {
 		dev->stats.rx_dropped++;
 		return;
 	}
 	skb_put(skb, length + ARC_HDR_SIZE);
 	skb->dev = dev;
 
-	pkt = (struct archdr *) skb->data;
+	pkt = (struct archdr *)skb->data;
 
 	/* up to sizeof(pkt->soft) has already been copied from the card */
 	memcpy(pkt, pkthdr, sizeof(struct archdr));
@@ -155,21 +150,19 @@
 				      pkt->soft.raw + sizeof(pkt->soft),
 				      length - sizeof(pkt->soft));
 
-	BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
+	if (BUGLVL(D_SKB))
+		arcnet_dump_skb(dev, skb, "rx");
 
 	skb->protocol = type_trans(skb, dev);
 	netif_rx(skb);
 }
 
-
-/*
- * Create the ARCnet hard/soft headers for RFC1051.
- */
+/* Create the ARCnet hard/soft headers for RFC1051 */
 static int build_header(struct sk_buff *skb, struct net_device *dev,
 			unsigned short type, uint8_t daddr)
 {
 	int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
-	struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
+	struct archdr *pkt = (struct archdr *)skb_push(skb, hdr_size);
 	struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
 
 	/* set the protocol ID according to RFC1051 */
@@ -181,29 +174,26 @@
 		soft->proto = ARC_P_ARP_RFC1051;
 		break;
 	default:
-		BUGMSG(D_NORMAL, "RFC1051: I don't understand protocol %d (%Xh)\n",
-		       type, type);
+		arc_printk(D_NORMAL, dev, "RFC1051: I don't understand protocol %d (%Xh)\n",
+			   type, type);
 		dev->stats.tx_errors++;
 		dev->stats.tx_aborted_errors++;
 		return 0;
 	}
 
-
-	/*
-	 * Set the source hardware address.
+	/* Set the source hardware address.
 	 *
 	 * This is pretty pointless for most purposes, but it can help in
-	 * debugging.  ARCnet does not allow us to change the source address in
-	 * the actual packet sent)
+	 * debugging.  ARCnet does not allow us to change the source address
+	 * in the actual packet sent.
 	 */
 	pkt->hard.source = *dev->dev_addr;
 
 	/* see linux/net/ethernet/eth.c to see where I got the following */
 
 	if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
-		/* 
-		 * FIXME: fill in the last byte of the dest ipaddr here to better
-		 * comply with RFC1051 in "noarp" mode.
+		/* FIXME: fill in the last byte of the dest ipaddr here to
+		 * better comply with RFC1051 in "noarp" mode.
 		 */
 		pkt->hard.dest = 0;
 		return hdr_size;
@@ -214,7 +204,6 @@
 	return hdr_size;	/* success */
 }
 
-
 static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
 		      int bufnum)
 {
@@ -222,15 +211,16 @@
 	struct arc_hardware *hard = &pkt->hard;
 	int ofs;
 
-	BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n",
-	       lp->next_tx, lp->cur_tx, bufnum);
+	arc_printk(D_DURING, dev, "prepare_tx: txbufs=%d/%d/%d\n",
+		   lp->next_tx, lp->cur_tx, bufnum);
 
-	length -= ARC_HDR_SIZE;	/* hard header is not included in packet length */
+	/* hard header is not included in packet length */
+	length -= ARC_HDR_SIZE;
 
 	if (length > XMTU) {
 		/* should never happen! other people already check for this. */
-		BUGMSG(D_NORMAL, "Bug!  prepare_tx with size %d (> %d)\n",
-		       length, XMTU);
+		arc_printk(D_NORMAL, dev, "Bug!  prepare_tx with size %d (> %d)\n",
+			   length, XMTU);
 		length = XMTU;
 	}
 	if (length > MinTU) {
@@ -239,8 +229,9 @@
 	} else if (length > MTU) {
 		hard->offset[0] = 0;
 		hard->offset[1] = ofs = 512 - length - 3;
-	} else
+	} else {
 		hard->offset[0] = ofs = 256 - length;
+	}
 
 	lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
 	lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft, length);
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index b71431a..566da5e 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -1,6 +1,6 @@
 /*
  * Linux ARCnet driver - RFC1201 (standard) packet encapsulation
- * 
+ *
  * Written 1994-1999 by Avery Pennarun.
  * Derived from skeleton.c by Donald Becker.
  *
@@ -23,17 +23,19 @@
  *
  * **********************
  */
+
+#define pr_fmt(fmt) "arcnet:" KBUILD_MODNAME ": " fmt
+
 #include <linux/gfp.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/if_arp.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
-#include <linux/arcdevice.h>
+
+#include "arcdevice.h"
 
 MODULE_LICENSE("GPL");
-#define VERSION "arcnet: RFC1201 \"standard\" (`a') encapsulation support loaded.\n"
-
 
 static __be16 type_trans(struct sk_buff *skb, struct net_device *dev);
 static void rx(struct net_device *dev, int bufnum,
@@ -44,8 +46,7 @@
 		      int bufnum);
 static int continue_tx(struct net_device *dev, int bufnum);
 
-static struct ArcProto rfc1201_proto =
-{
+static struct ArcProto rfc1201_proto = {
 	.suffix		= 'a',
 	.mtu		= 1500,	/* could be more, but some receivers can't handle it... */
 	.is_ip          = 1,    /* This is for sending IP and ARP packages */
@@ -56,10 +57,9 @@
 	.ack_tx         = NULL
 };
 
-
 static int __init arcnet_rfc1201_init(void)
 {
-	printk(VERSION);
+	pr_info("%s\n", "RFC1201 \"standard\" (`a') encapsulation support loaded");
 
 	arc_proto_map[ARC_P_IP]
 	    = arc_proto_map[ARC_P_IPV6]
@@ -84,14 +84,13 @@
 module_init(arcnet_rfc1201_init);
 module_exit(arcnet_rfc1201_exit);
 
-/*
- * Determine a packet's protocol ID.
- * 
+/* Determine a packet's protocol ID.
+ *
  * With ARCnet we have to convert everything to Ethernet-style stuff.
  */
 static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
 {
-	struct archdr *pkt = (struct archdr *) skb->data;
+	struct archdr *pkt = (struct archdr *)skb->data;
 	struct arc_rfc1201 *soft = &pkt->soft.rfc1201;
 	int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE;
 
@@ -99,9 +98,9 @@
 	skb_reset_mac_header(skb);
 	skb_pull(skb, hdr_size);
 
-	if (pkt->hard.dest == 0)
+	if (pkt->hard.dest == 0) {
 		skb->pkt_type = PACKET_BROADCAST;
-	else if (dev->flags & IFF_PROMISC) {
+	} else if (dev->flags & IFF_PROMISC) {
 		/* if we're not sending to ourselves :) */
 		if (pkt->hard.dest != dev->dev_addr[0])
 			skb->pkt_type = PACKET_OTHERHOST;
@@ -129,7 +128,6 @@
 	return htons(ETH_P_IP);
 }
 
-
 /* packet receiver */
 static void rx(struct net_device *dev, int bufnum,
 	       struct archdr *pkthdr, int length)
@@ -141,7 +139,8 @@
 	int saddr = pkt->hard.source, ofs;
 	struct Incoming *in = &lp->rfc1201.incoming[saddr];
 
-	BUGMSG(D_DURING, "it's an RFC1201 packet (length=%d)\n", length);
+	arc_printk(D_DURING, dev, "it's an RFC1201 packet (length=%d)\n",
+		   length);
 
 	if (length >= MinTU)
 		ofs = 512 - length;
@@ -149,11 +148,11 @@
 		ofs = 256 - length;
 
 	if (soft->split_flag == 0xFF) {		/* Exception Packet */
-		if (length >= 4 + RFC1201_HDR_SIZE)
-			BUGMSG(D_DURING, "compensating for exception packet\n");
-		else {
-			BUGMSG(D_EXTRA, "short RFC1201 exception packet from %02Xh",
-			       saddr);
+		if (length >= 4 + RFC1201_HDR_SIZE) {
+			arc_printk(D_DURING, dev, "compensating for exception packet\n");
+		} else {
+			arc_printk(D_EXTRA, dev, "short RFC1201 exception packet from %02Xh",
+				   saddr);
 			return;
 		}
 
@@ -164,12 +163,13 @@
 				      soft, sizeof(pkt->soft));
 	}
 	if (!soft->split_flag) {	/* not split */
-		BUGMSG(D_RX, "incoming is not split (splitflag=%d)\n",
-		       soft->split_flag);
+		arc_printk(D_RX, dev, "incoming is not split (splitflag=%d)\n",
+			   soft->split_flag);
 
 		if (in->skb) {	/* already assembling one! */
-			BUGMSG(D_EXTRA, "aborting assembly (seq=%d) for unsplit packet (splitflag=%d, seq=%d)\n",
-			 in->sequence, soft->split_flag, soft->sequence);
+			arc_printk(D_EXTRA, dev, "aborting assembly (seq=%d) for unsplit packet (splitflag=%d, seq=%d)\n",
+				   in->sequence, soft->split_flag,
+				   soft->sequence);
 			lp->rfc1201.aborted_seq = soft->sequence;
 			dev_kfree_skb_irq(in->skb);
 			dev->stats.rx_errors++;
@@ -179,82 +179,86 @@
 		in->sequence = soft->sequence;
 
 		skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
-		if (skb == NULL) {
-			BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
+		if (!skb) {
 			dev->stats.rx_dropped++;
 			return;
 		}
 		skb_put(skb, length + ARC_HDR_SIZE);
 		skb->dev = dev;
 
-		pkt = (struct archdr *) skb->data;
+		pkt = (struct archdr *)skb->data;
 		soft = &pkt->soft.rfc1201;
 
-		/* up to sizeof(pkt->soft) has already been copied from the card */
+		/* up to sizeof(pkt->soft) has already
+		 * been copied from the card
+		 */
 		memcpy(pkt, pkthdr, sizeof(struct archdr));
 		if (length > sizeof(pkt->soft))
-			lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
-				       pkt->soft.raw + sizeof(pkt->soft),
+			lp->hw.copy_from_card(dev, bufnum,
+					      ofs + sizeof(pkt->soft),
+					      pkt->soft.raw + sizeof(pkt->soft),
 					      length - sizeof(pkt->soft));
 
-		/*
-		 * ARP packets have problems when sent from some DOS systems: the
-		 * source address is always 0!  So we take the hardware source addr
-		 * (which is impossible to fumble) and insert it ourselves.
+		/* ARP packets have problems when sent from some DOS systems:
+		 * the source address is always 0!
+		 * So we take the hardware source addr (which is impossible
+		 * to fumble) and insert it ourselves.
 		 */
 		if (soft->proto == ARC_P_ARP) {
-			struct arphdr *arp = (struct arphdr *) soft->payload;
+			struct arphdr *arp = (struct arphdr *)soft->payload;
 
 			/* make sure addresses are the right length */
 			if (arp->ar_hln == 1 && arp->ar_pln == 4) {
-				uint8_t *cptr = (uint8_t *) arp + sizeof(struct arphdr);
+				uint8_t *cptr = (uint8_t *)arp + sizeof(struct arphdr);
 
 				if (!*cptr) {	/* is saddr = 00? */
-					BUGMSG(D_EXTRA,
-					       "ARP source address was 00h, set to %02Xh.\n",
-					       saddr);
+					arc_printk(D_EXTRA, dev,
+						   "ARP source address was 00h, set to %02Xh\n",
+						   saddr);
 					dev->stats.rx_crc_errors++;
 					*cptr = saddr;
 				} else {
-					BUGMSG(D_DURING, "ARP source address (%Xh) is fine.\n",
-					       *cptr);
+					arc_printk(D_DURING, dev, "ARP source address (%Xh) is fine.\n",
+						   *cptr);
 				}
 			} else {
-				BUGMSG(D_NORMAL, "funny-shaped ARP packet. (%Xh, %Xh)\n",
-				       arp->ar_hln, arp->ar_pln);
+				arc_printk(D_NORMAL, dev, "funny-shaped ARP packet. (%Xh, %Xh)\n",
+					   arp->ar_hln, arp->ar_pln);
 				dev->stats.rx_errors++;
 				dev->stats.rx_crc_errors++;
 			}
 		}
-		BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
+		if (BUGLVL(D_SKB))
+			arcnet_dump_skb(dev, skb, "rx");
 
 		skb->protocol = type_trans(skb, dev);
 		netif_rx(skb);
 	} else {		/* split packet */
-		/*
-		 * NOTE: MSDOS ARP packet correction should only need to apply to
-		 * unsplit packets, since ARP packets are so short.
+		/* NOTE: MSDOS ARP packet correction should only need to
+		 * apply to unsplit packets, since ARP packets are so short.
 		 *
-		 * My interpretation of the RFC1201 document is that if a packet is
-		 * received out of order, the entire assembly process should be
-		 * aborted.
+		 * My interpretation of the RFC1201 document is that if a
+		 * packet is received out of order, the entire assembly
+		 * process should be aborted.
 		 *
-		 * The RFC also mentions "it is possible for successfully received
-		 * packets to be retransmitted." As of 0.40 all previously received
-		 * packets are allowed, not just the most recent one.
+		 * The RFC also mentions "it is possible for successfully
+		 * received packets to be retransmitted." As of 0.40 all
+		 * previously received packets are allowed, not just the
+		 * most recent one.
 		 *
-		 * We allow multiple assembly processes, one for each ARCnet card
-		 * possible on the network.  Seems rather like a waste of memory,
-		 * but there's no other way to be reliable.
+		 * We allow multiple assembly processes, one for each
+		 * ARCnet card possible on the network.
+		 * Seems rather like a waste of memory, but there's no
+		 * other way to be reliable.
 		 */
 
-		BUGMSG(D_RX, "packet is split (splitflag=%d, seq=%d)\n",
-		       soft->split_flag, in->sequence);
+		arc_printk(D_RX, dev, "packet is split (splitflag=%d, seq=%d)\n",
+			   soft->split_flag, in->sequence);
 
 		if (in->skb && in->sequence != soft->sequence) {
-			BUGMSG(D_EXTRA, "wrong seq number (saddr=%d, expected=%d, seq=%d, splitflag=%d)\n",
-			       saddr, in->sequence, soft->sequence,
-			       soft->split_flag);
+			arc_printk(D_EXTRA, dev, "wrong seq number (saddr=%d, expected=%d, seq=%d, splitflag=%d)\n",
+				   saddr, in->sequence, soft->sequence,
+				   soft->split_flag);
 			dev_kfree_skb_irq(in->skb);
 			in->skb = NULL;
 			dev->stats.rx_errors++;
@@ -262,24 +266,23 @@
 			in->lastpacket = in->numpackets = 0;
 		}
 		if (soft->split_flag & 1) {	/* first packet in split */
-			BUGMSG(D_RX, "brand new splitpacket (splitflag=%d)\n",
-			       soft->split_flag);
+			arc_printk(D_RX, dev, "brand new splitpacket (splitflag=%d)\n",
+				   soft->split_flag);
 			if (in->skb) {	/* already assembling one! */
-				BUGMSG(D_EXTRA, "aborting previous (seq=%d) assembly "
-				       "(splitflag=%d, seq=%d)\n",
-				       in->sequence, soft->split_flag,
-				       soft->sequence);
+				arc_printk(D_EXTRA, dev, "aborting previous (seq=%d) assembly (splitflag=%d, seq=%d)\n",
+					   in->sequence, soft->split_flag,
+					   soft->sequence);
 				dev->stats.rx_errors++;
 				dev->stats.rx_missed_errors++;
 				dev_kfree_skb_irq(in->skb);
 			}
 			in->sequence = soft->sequence;
-			in->numpackets = ((unsigned) soft->split_flag >> 1) + 2;
+			in->numpackets = ((unsigned)soft->split_flag >> 1) + 2;
 			in->lastpacket = 1;
 
 			if (in->numpackets > 16) {
-				BUGMSG(D_EXTRA, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n",
-				       soft->split_flag);
+				arc_printk(D_EXTRA, dev, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n",
+					   soft->split_flag);
 				lp->rfc1201.aborted_seq = soft->sequence;
 				dev->stats.rx_errors++;
 				dev->stats.rx_length_errors++;
@@ -287,14 +290,14 @@
 			}
 			in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE,
 						  GFP_ATOMIC);
-			if (skb == NULL) {
-				BUGMSG(D_NORMAL, "(split) memory squeeze, dropping packet.\n");
+			if (!skb) {
+				arc_printk(D_NORMAL, dev, "(split) memory squeeze, dropping packet.\n");
 				lp->rfc1201.aborted_seq = soft->sequence;
 				dev->stats.rx_dropped++;
 				return;
 			}
 			skb->dev = dev;
-			pkt = (struct archdr *) skb->data;
+			pkt = (struct archdr *)skb->data;
 			soft = &pkt->soft.rfc1201;
 
 			memcpy(pkt, pkthdr, ARC_HDR_SIZE + RFC1201_HDR_SIZE);
@@ -302,37 +305,37 @@
 
 			soft->split_flag = 0;	/* end result won't be split */
 		} else {	/* not first packet */
-			int packetnum = ((unsigned) soft->split_flag >> 1) + 1;
+			int packetnum = ((unsigned)soft->split_flag >> 1) + 1;
 
-			/*
-			 * if we're not assembling, there's no point trying to
+			/* if we're not assembling, there's no point trying to
 			 * continue.
 			 */
 			if (!in->skb) {
 				if (lp->rfc1201.aborted_seq != soft->sequence) {
-					BUGMSG(D_EXTRA, "can't continue split without starting "
-					       "first! (splitflag=%d, seq=%d, aborted=%d)\n",
-					soft->split_flag, soft->sequence,
-					       lp->rfc1201.aborted_seq);
+					arc_printk(D_EXTRA, dev, "can't continue split without starting first! (splitflag=%d, seq=%d, aborted=%d)\n",
+						   soft->split_flag,
+						   soft->sequence,
+						   lp->rfc1201.aborted_seq);
 					dev->stats.rx_errors++;
 					dev->stats.rx_missed_errors++;
 				}
 				return;
 			}
 			in->lastpacket++;
-			if (packetnum != in->lastpacket) {	/* not the right flag! */
+			/* if not the right flag */
+			if (packetnum != in->lastpacket) {
 				/* harmless duplicate? ignore. */
 				if (packetnum <= in->lastpacket - 1) {
-					BUGMSG(D_EXTRA, "duplicate splitpacket ignored! (splitflag=%d)\n",
-					       soft->split_flag);
+					arc_printk(D_EXTRA, dev, "duplicate splitpacket ignored! (splitflag=%d)\n",
+						   soft->split_flag);
 					dev->stats.rx_errors++;
 					dev->stats.rx_frame_errors++;
 					return;
 				}
 				/* "bad" duplicate, kill reassembly */
-				BUGMSG(D_EXTRA, "out-of-order splitpacket, reassembly "
-				       "(seq=%d) aborted (splitflag=%d, seq=%d)\n",
-				       in->sequence, soft->split_flag, soft->sequence);
+				arc_printk(D_EXTRA, dev, "out-of-order splitpacket, reassembly (seq=%d) aborted (splitflag=%d, seq=%d)\n",
+					   in->sequence, soft->split_flag,
+					   soft->sequence);
 				lp->rfc1201.aborted_seq = soft->sequence;
 				dev_kfree_skb_irq(in->skb);
 				in->skb = NULL;
@@ -341,7 +344,7 @@
 				in->lastpacket = in->numpackets = 0;
 				return;
 			}
-			pkt = (struct archdr *) in->skb->data;
+			pkt = (struct archdr *)in->skb->data;
 			soft = &pkt->soft.rfc1201;
 		}
 
@@ -357,11 +360,12 @@
 			in->skb = NULL;
 			in->lastpacket = in->numpackets = 0;
 
-	    BUGMSG(D_SKB_SIZE, "skb: received %d bytes from %02X (unsplit)\n",
-    		skb->len, pkt->hard.source);
-	    BUGMSG(D_SKB_SIZE, "skb: received %d bytes from %02X (split)\n",
-    		skb->len, pkt->hard.source);
-			BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
+			arc_printk(D_SKB_SIZE, dev, "skb: received %d bytes from %02X (unsplit)\n",
+				   skb->len, pkt->hard.source);
+			arc_printk(D_SKB_SIZE, dev, "skb: received %d bytes from %02X (split)\n",
+				   skb->len, pkt->hard.source);
+			if (BUGLVL(D_SKB))
+				arcnet_dump_skb(dev, skb, "rx");
 
 			skb->protocol = type_trans(skb, dev);
 			netif_rx(skb);
@@ -369,14 +373,13 @@
 	}
 }
 
-
 /* Create the ARCnet hard/soft headers for RFC1201. */
 static int build_header(struct sk_buff *skb, struct net_device *dev,
 			unsigned short type, uint8_t daddr)
 {
 	struct arcnet_local *lp = netdev_priv(dev);
 	int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE;
-	struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
+	struct archdr *pkt = (struct archdr *)skb_push(skb, hdr_size);
 	struct arc_rfc1201 *soft = &pkt->soft.rfc1201;
 
 	/* set the protocol ID according to RFC1201 */
@@ -402,19 +405,18 @@
 		soft->proto = ARC_P_ATALK;
 		break;
 	default:
-		BUGMSG(D_NORMAL, "RFC1201: I don't understand protocol %d (%Xh)\n",
-		       type, type);
+		arc_printk(D_NORMAL, dev, "RFC1201: I don't understand protocol %d (%Xh)\n",
+			   type, type);
 		dev->stats.tx_errors++;
 		dev->stats.tx_aborted_errors++;
 		return 0;
 	}
 
-	/*
-	 * Set the source hardware address.
+	/* Set the source hardware address.
 	 *
 	 * This is pretty pointless for most purposes, but it can help in
-	 * debugging.  ARCnet does not allow us to change the source address in
-	 * the actual packet sent)
+	 * debugging.  ARCnet does not allow us to change the source address
+	 * in the actual packet sent.
 	 */
 	pkt->hard.source = *dev->dev_addr;
 
@@ -424,10 +426,10 @@
 	/* see linux/net/ethernet/eth.c to see where I got the following */
 
 	if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
-		/* 
-		 * FIXME: fill in the last byte of the dest ipaddr here to better
-		 * comply with RFC1051 in "noarp" mode.  For now, always broadcasting
-		 * will probably at least get packets sent out :)
+		/* FIXME: fill in the last byte of the dest ipaddr here
+		 * to better comply with RFC1051 in "noarp" mode.
+		 * For now, always broadcasting will probably at least get
+		 * packets sent out :)
 		 */
 		pkt->hard.dest = 0;
 		return hdr_size;
@@ -437,7 +439,6 @@
 	return hdr_size;
 }
 
-
 static void load_pkt(struct net_device *dev, struct arc_hardware *hard,
 		     struct arc_rfc1201 *soft, int softlen, int bufnum)
 {
@@ -461,8 +462,9 @@
 		hard->offset[1] = ofs - RFC1201_HDR_SIZE;
 		lp->hw.copy_to_card(dev, bufnum, ofs - RFC1201_HDR_SIZE,
 				    &excsoft, RFC1201_HDR_SIZE);
-	} else
+	} else {
 		hard->offset[0] = ofs = 256 - softlen;
+	}
 
 	lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
 	lp->hw.copy_to_card(dev, bufnum, ofs, soft, softlen);
@@ -470,7 +472,6 @@
 	lp->lastload_dest = hard->dest;
 }
 
-
 static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
 		      int bufnum)
 {
@@ -478,11 +479,11 @@
 	const int maxsegsize = XMTU - RFC1201_HDR_SIZE;
 	struct Outgoing *out;
 
+	arc_printk(D_DURING, dev, "prepare_tx: txbufs=%d/%d/%d\n",
+		   lp->next_tx, lp->cur_tx, bufnum);
 
-	BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n",
-	       lp->next_tx, lp->cur_tx, bufnum);
-
-	length -= ARC_HDR_SIZE;	/* hard header is not included in packet length */
+	/* hard header is not included in packet length */
+	length -= ARC_HDR_SIZE;
 	pkt->soft.rfc1201.split_flag = 0;
 
 	/* need to do a split packet? */
@@ -494,9 +495,9 @@
 		out->numsegs = (out->dataleft + maxsegsize - 1) / maxsegsize;
 		out->segnum = 0;
 
-		BUGMSG(D_DURING, "rfc1201 prep_tx: ready for %d-segment split "
-		       "(%d bytes, seq=%d)\n", out->numsegs, out->length,
-		       pkt->soft.rfc1201.sequence);
+		arc_printk(D_DURING, dev, "rfc1201 prep_tx: ready for %d-segment split (%d bytes, seq=%d)\n",
+			   out->numsegs, out->length,
+			   pkt->soft.rfc1201.sequence);
 
 		return 0;	/* not done */
 	}
@@ -506,7 +507,6 @@
 	return 1;		/* done */
 }
 
-
 static int continue_tx(struct net_device *dev, int bufnum)
 {
 	struct arcnet_local *lp = netdev_priv(dev);
@@ -516,9 +516,9 @@
 	int maxsegsize = XMTU - RFC1201_HDR_SIZE;
 	int seglen;
 
-	BUGMSG(D_DURING,
-	  "rfc1201 continue_tx: loading segment %d(+1) of %d (seq=%d)\n",
-	       out->segnum, out->numsegs, soft->sequence);
+	arc_printk(D_DURING, dev,
+		   "rfc1201 continue_tx: loading segment %d(+1) of %d (seq=%d)\n",
+		   out->segnum, out->numsegs, soft->sequence);
 
 	/* the "new" soft header comes right before the data chunk */
 	newsoft = (struct arc_rfc1201 *)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index e8c96b8..6d04183 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -129,6 +129,16 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called rcar_can.
 
+config CAN_SUN4I
+	tristate "Allwinner A10 CAN controller"
+	depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
+	---help---
+	  Say Y here if you want to use CAN controller found on Allwinner
+	  A10/A20 SoCs.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called sun4i_can.
+
 config CAN_XILINXCAN
 	tristate "Xilinx CAN"
 	depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index c533c62..1f21cef 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -27,6 +27,7 @@
 obj-$(CONFIG_PCH_CAN)		+= pch_can.o
 obj-$(CONFIG_CAN_GRCAN)		+= grcan.o
 obj-$(CONFIG_CAN_RCAR)		+= rcar_can.o
+obj-$(CONFIG_CAN_SUN4I)		+= sun4i_can.o
 obj-$(CONFIG_CAN_XILINXCAN)	+= xilinx_can.o
 
 subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index c83f0f03..868fe94 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -26,12 +26,8 @@
 #include <linux/can/led.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -63,10 +59,10 @@
 #define FLEXCAN_MCR_LPRIO_EN		BIT(13)
 #define FLEXCAN_MCR_AEN			BIT(12)
 #define FLEXCAN_MCR_MAXMB(x)		((x) & 0x7f)
-#define FLEXCAN_MCR_IDAM_A		(0 << 8)
-#define FLEXCAN_MCR_IDAM_B		(1 << 8)
-#define FLEXCAN_MCR_IDAM_C		(2 << 8)
-#define FLEXCAN_MCR_IDAM_D		(3 << 8)
+#define FLEXCAN_MCR_IDAM_A		(0x0 << 8)
+#define FLEXCAN_MCR_IDAM_B		(0x1 << 8)
+#define FLEXCAN_MCR_IDAM_C		(0x2 << 8)
+#define FLEXCAN_MCR_IDAM_D		(0x3 << 8)
 
 /* FLEXCAN control register (CANCTRL) bits */
 #define FLEXCAN_CTRL_PRESDIV(x)		(((x) & 0xff) << 24)
@@ -161,7 +157,7 @@
 #define FLEXCAN_MB_CODE_RX_INACTIVE	(0x0 << 24)
 #define FLEXCAN_MB_CODE_RX_EMPTY	(0x4 << 24)
 #define FLEXCAN_MB_CODE_RX_FULL		(0x2 << 24)
-#define FLEXCAN_MB_CODE_RX_OVERRRUN	(0x6 << 24)
+#define FLEXCAN_MB_CODE_RX_OVERRUN	(0x6 << 24)
 #define FLEXCAN_MB_CODE_RX_RANSWER	(0xa << 24)
 
 #define FLEXCAN_MB_CODE_TX_INACTIVE	(0x8 << 24)
@@ -175,12 +171,9 @@
 #define FLEXCAN_MB_CNT_LENGTH(x)	(((x) & 0xf) << 16)
 #define FLEXCAN_MB_CNT_TIMESTAMP(x)	((x) & 0xffff)
 
-#define FLEXCAN_MB_CODE_MASK		(0xf0ffffff)
+#define FLEXCAN_TIMEOUT_US		(50)
 
-#define FLEXCAN_TIMEOUT_US             (50)
-
-/*
- * FLEXCAN hardware feature flags
+/* FLEXCAN hardware feature flags
  *
  * Below is some version info we got:
  *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT Memory err RTR re-
@@ -194,9 +187,9 @@
  *
  * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
  */
-#define FLEXCAN_HAS_V10_FEATURES	BIT(1) /* For core version >= 10 */
-#define FLEXCAN_HAS_BROKEN_ERR_STATE	BIT(2) /* [TR]WRN_INT not connected */
-#define FLEXCAN_HAS_MECR_FEATURES	BIT(3) /* Memory error detection */
+#define FLEXCAN_QUIRK_BROKEN_ERR_STATE	BIT(1) /* [TR]WRN_INT not connected */
+#define FLEXCAN_QUIRK_DISABLE_RXFG	BIT(2) /* Disable RX FIFO Global mask */
+#define FLEXCAN_QUIRK_DISABLE_MECR	BIT(3) /* Disble Memory error detection */
 
 /* Structure of the message buffer */
 struct flexcan_mb {
@@ -228,7 +221,7 @@
 	u32 rxfgmask;		/* 0x48 */
 	u32 rxfir;		/* 0x4c */
 	u32 _reserved3[12];	/* 0x50 */
-	struct flexcan_mb cantxfg[64];	/* 0x80 */
+	struct flexcan_mb mb[64];	/* 0x80 */
 	/* FIFO-mode:
 	 *			MB
 	 * 0x080...0x08f	0	RX message buffer
@@ -236,7 +229,7 @@
 	 * 0x0e0...0x0ff	6-7	8 entry ID table
 	 *				(mx25, mx28, mx35, mx53)
 	 * 0x0e0...0x2df	6-7..37	8..128 entry ID table
-	 *			  	size conf'ed via ctrl2::RFFN
+	 *				size conf'ed via ctrl2::RFFN
 	 *				(mx6, vf610)
 	 */
 	u32 _reserved4[408];
@@ -251,14 +244,14 @@
 };
 
 struct flexcan_devtype_data {
-	u32 features;	/* hardware controller features */
+	u32 quirks;		/* quirks needed for different IP cores */
 };
 
 struct flexcan_priv {
 	struct can_priv can;
 	struct napi_struct napi;
 
-	void __iomem *base;
+	struct flexcan_regs __iomem *regs;
 	u32 reg_esr;
 	u32 reg_ctrl_default;
 
@@ -270,14 +263,17 @@
 };
 
 static struct flexcan_devtype_data fsl_p1010_devtype_data = {
-	.features = FLEXCAN_HAS_BROKEN_ERR_STATE,
+	.quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE,
 };
+
 static struct flexcan_devtype_data fsl_imx28_devtype_data;
+
 static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
-	.features = FLEXCAN_HAS_V10_FEATURES,
+	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG,
 };
+
 static struct flexcan_devtype_data fsl_vf610_devtype_data = {
-	.features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_MECR_FEATURES,
+	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_DISABLE_MECR,
 };
 
 static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -292,11 +288,10 @@
 	.brp_inc = 1,
 };
 
-/*
- * Abstract off the read/write for arm versus ppc. This
+/* Abstract off the read/write for arm versus ppc. This
  * assumes that PPC uses big-endian registers and everything
  * else uses little-endian registers, independent of CPU
- * endianess.
+ * endianness.
  */
 #if defined(CONFIG_PPC)
 static inline u32 flexcan_read(void __iomem *addr)
@@ -345,7 +340,7 @@
 
 static int flexcan_chip_enable(struct flexcan_priv *priv)
 {
-	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_regs __iomem *regs = priv->regs;
 	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
 	u32 reg;
 
@@ -364,7 +359,7 @@
 
 static int flexcan_chip_disable(struct flexcan_priv *priv)
 {
-	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_regs __iomem *regs = priv->regs;
 	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
 	u32 reg;
 
@@ -383,7 +378,7 @@
 
 static int flexcan_chip_freeze(struct flexcan_priv *priv)
 {
-	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_regs __iomem *regs = priv->regs;
 	unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
 	u32 reg;
 
@@ -402,7 +397,7 @@
 
 static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
 {
-	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_regs __iomem *regs = priv->regs;
 	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
 	u32 reg;
 
@@ -421,7 +416,7 @@
 
 static int flexcan_chip_softreset(struct flexcan_priv *priv)
 {
-	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_regs __iomem *regs = priv->regs;
 	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
 
 	flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
@@ -434,12 +429,11 @@
 	return 0;
 }
 
-
 static int __flexcan_get_berr_counter(const struct net_device *dev,
 				      struct can_berr_counter *bec)
 {
 	const struct flexcan_priv *priv = netdev_priv(dev);
-	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 reg = flexcan_read(&regs->ecr);
 
 	bec->txerr = (reg >> 0) & 0xff;
@@ -474,9 +468,10 @@
 static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	const struct flexcan_priv *priv = netdev_priv(dev);
-	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_regs __iomem *regs = priv->regs;
 	struct can_frame *cf = (struct can_frame *)skb->data;
 	u32 can_id;
+	u32 data;
 	u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16);
 
 	if (can_dropped_invalid_skb(dev, skb))
@@ -495,26 +490,26 @@
 		ctrl |= FLEXCAN_MB_CNT_RTR;
 
 	if (cf->can_dlc > 0) {
-		u32 data = be32_to_cpup((__be32 *)&cf->data[0]);
-		flexcan_write(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[0]);
+		data = be32_to_cpup((__be32 *)&cf->data[0]);
+		flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[0]);
 	}
 	if (cf->can_dlc > 3) {
-		u32 data = be32_to_cpup((__be32 *)&cf->data[4]);
-		flexcan_write(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[1]);
+		data = be32_to_cpup((__be32 *)&cf->data[4]);
+		flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[1]);
 	}
 
 	can_put_echo_skb(skb, dev, 0);
 
-	flexcan_write(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
-	flexcan_write(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+	flexcan_write(can_id, &regs->mb[FLEXCAN_TX_BUF_ID].can_id);
+	flexcan_write(ctrl, &regs->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
 
 	/* Errata ERR005829 step8:
 	 * Write twice INACTIVE(0x8) code to first MB.
 	 */
 	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-		      &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+		      &regs->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
 	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-		      &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+		      &regs->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
 
 	return NETDEV_TX_OK;
 }
@@ -597,14 +592,14 @@
 	flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
 	if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
 		tx_state = unlikely(reg_esr & FLEXCAN_ESR_TX_WRN) ?
-			   CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
+			CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
 		rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
-			   CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
+			CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
 		new_state = max(tx_state, rx_state);
 	} else {
 		__flexcan_get_berr_counter(dev, &bec);
 		new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ?
-			    CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
+			CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
 		rx_state = bec.rxerr >= bec.txerr ? new_state : 0;
 		tx_state = bec.rxerr <= bec.txerr ? new_state : 0;
 	}
@@ -633,8 +628,8 @@
 			      struct can_frame *cf)
 {
 	const struct flexcan_priv *priv = netdev_priv(dev);
-	struct flexcan_regs __iomem *regs = priv->base;
-	struct flexcan_mb __iomem *mb = &regs->cantxfg[0];
+	struct flexcan_regs __iomem *regs = priv->regs;
+	struct flexcan_mb __iomem *mb = &regs->mb[0];
 	u32 reg_ctrl, reg_id;
 
 	reg_ctrl = flexcan_read(&mb->can_ctrl);
@@ -683,12 +678,11 @@
 {
 	struct net_device *dev = napi->dev;
 	const struct flexcan_priv *priv = netdev_priv(dev);
-	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 reg_iflag1, reg_esr;
 	int work_done = 0;
 
-	/*
-	 * The error bits are cleared on read,
+	/* The error bits are cleared on read,
 	 * use saved value from irq handler.
 	 */
 	reg_esr = flexcan_read(&regs->esr) | priv->reg_esr;
@@ -723,17 +717,17 @@
 	struct net_device *dev = dev_id;
 	struct net_device_stats *stats = &dev->stats;
 	struct flexcan_priv *priv = netdev_priv(dev);
-	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 reg_iflag1, reg_esr;
 
 	reg_iflag1 = flexcan_read(&regs->iflag1);
 	reg_esr = flexcan_read(&regs->esr);
+
 	/* ACK all bus error and state change IRQ sources */
 	if (reg_esr & FLEXCAN_ESR_ALL_INT)
 		flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
 
-	/*
-	 * schedule NAPI in case of:
+	/* schedule NAPI in case of:
 	 * - rx IRQ
 	 * - state change IRQ
 	 * - bus error IRQ and bus error reporting is activated
@@ -741,15 +735,14 @@
 	if ((reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) ||
 	    (reg_esr & FLEXCAN_ESR_ERR_STATE) ||
 	    flexcan_has_and_handle_berr(priv, reg_esr)) {
-		/*
-		 * The error bits are cleared on read,
+		/* The error bits are cleared on read,
 		 * save them for later use.
 		 */
 		priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS;
 		flexcan_write(FLEXCAN_IFLAG_DEFAULT &
-			~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->imask1);
+			      ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->imask1);
 		flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
-		       &regs->ctrl);
+			      &regs->ctrl);
 		napi_schedule(&priv->napi);
 	}
 
@@ -765,9 +758,10 @@
 		stats->tx_bytes += can_get_echo_skb(dev, 0);
 		stats->tx_packets++;
 		can_led_event(dev, CAN_LED_EVENT_TX);
-		/* after sending a RTR frame mailbox is in RX mode */
+
+		/* after sending a RTR frame MB is in RX mode */
 		flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-			      &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+			      &regs->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
 		flexcan_write((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
 		netif_wake_queue(dev);
 	}
@@ -779,7 +773,7 @@
 {
 	const struct flexcan_priv *priv = netdev_priv(dev);
 	const struct can_bittiming *bt = &priv->can.bittiming;
-	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 reg;
 
 	reg = flexcan_read(&regs->ctrl);
@@ -813,8 +807,7 @@
 		   flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
 }
 
-/*
- * flexcan_chip_start
+/* flexcan_chip_start
  *
  * this functions is entered with clocks enabled
  *
@@ -822,7 +815,7 @@
 static int flexcan_chip_start(struct net_device *dev)
 {
 	struct flexcan_priv *priv = netdev_priv(dev);
-	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr;
 	int err, i;
 
@@ -838,29 +831,26 @@
 
 	flexcan_set_bittiming(dev);
 
-	/*
-	 * MCR
+	/* MCR
 	 *
 	 * enable freeze
 	 * enable fifo
 	 * halt now
 	 * only supervisor access
 	 * enable warning int
-	 * choose format C
 	 * disable local echo
-	 *
+	 * choose format C
+	 * set max mailbox number
 	 */
 	reg_mcr = flexcan_read(&regs->mcr);
 	reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
 	reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
-		FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
-		FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS |
-		FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
+		FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS |
+		FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
 	netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
 	flexcan_write(reg_mcr, &regs->mcr);
 
-	/*
-	 * CTRL
+	/* CTRL
 	 *
 	 * disable timer sync feature
 	 *
@@ -875,12 +865,12 @@
 	reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
 	reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
 		FLEXCAN_CTRL_ERR_STATE;
-	/*
-	 * enable the "error interrupt" (FLEXCAN_CTRL_ERR_MSK),
+
+	/* enable the "error interrupt" (FLEXCAN_CTRL_ERR_MSK),
 	 * on most Flexcan cores, too. Otherwise we don't get
 	 * any error warning or passive interrupts.
 	 */
-	if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE ||
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE ||
 	    priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
 		reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
 	else
@@ -888,41 +878,41 @@
 
 	/* save for later use */
 	priv->reg_ctrl_default = reg_ctrl;
+	/* leave interrupts disabled for now */
+	reg_ctrl &= ~FLEXCAN_CTRL_ERR_ALL;
 	netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
 	flexcan_write(reg_ctrl, &regs->ctrl);
 
 	/* clear and invalidate all mailboxes first */
-	for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->cantxfg); i++) {
+	for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->mb); i++) {
 		flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
-			      &regs->cantxfg[i].can_ctrl);
+			      &regs->mb[i].can_ctrl);
 	}
 
 	/* Errata ERR005829: mark first TX mailbox as INACTIVE */
 	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-		      &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+		      &regs->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
 
 	/* mark TX mailbox as INACTIVE */
 	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-		      &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+		      &regs->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
 
 	/* acceptance mask/acceptance code (accept everything) */
 	flexcan_write(0x0, &regs->rxgmask);
 	flexcan_write(0x0, &regs->rx14mask);
 	flexcan_write(0x0, &regs->rx15mask);
 
-	if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
 		flexcan_write(0x0, &regs->rxfgmask);
 
-	/*
-	 * On Vybrid, disable memory error detection interrupts
+	/* On Vybrid, disable memory error detection interrupts
 	 * and freeze mode.
 	 * This also works around errata e5295 which generates
 	 * false positive memory errors and put the device in
 	 * freeze mode.
 	 */
-	if (priv->devtype_data->features & FLEXCAN_HAS_MECR_FEATURES) {
-		/*
-		 * Follow the protocol as described in "Detection
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_MECR) {
+		/* Follow the protocol as described in "Detection
 		 * and Correction of Memory Errors" to write to
 		 * MECR register
 		 */
@@ -934,7 +924,7 @@
 		reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
 		flexcan_write(reg_mecr, &regs->mecr);
 		reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
-				FLEXCAN_MECR_FANCEI_MSK);
+			      FLEXCAN_MECR_FANCEI_MSK);
 		flexcan_write(reg_mecr, &regs->mecr);
 	}
 
@@ -949,8 +939,11 @@
 
 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
-	/* enable FIFO interrupts */
+	/* enable interrupts atomically */
+	disable_irq(dev->irq);
+	flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
 	flexcan_write(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
+	enable_irq(dev->irq);
 
 	/* print chip status */
 	netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__,
@@ -965,16 +958,14 @@
 	return err;
 }
 
-/*
- * flexcan_chip_stop
+/* flexcan_chip_stop
  *
  * this functions is entered with clocks enabled
- *
  */
 static void flexcan_chip_stop(struct net_device *dev)
 {
 	struct flexcan_priv *priv = netdev_priv(dev);
-	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_regs __iomem *regs = priv->regs;
 
 	/* freeze + disable module */
 	flexcan_chip_freeze(priv);
@@ -987,8 +978,6 @@
 
 	flexcan_transceiver_disable(priv);
 	priv->can.state = CAN_STATE_STOPPED;
-
-	return;
 }
 
 static int flexcan_open(struct net_device *dev)
@@ -1085,7 +1074,7 @@
 static int register_flexcandev(struct net_device *dev)
 {
 	struct flexcan_priv *priv = netdev_priv(dev);
-	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 reg, err;
 
 	err = clk_prepare_enable(priv->clk_ipg);
@@ -1114,8 +1103,7 @@
 		FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
 	flexcan_write(reg, &regs->mcr);
 
-	/*
-	 * Currently we only support newer versions of this core
+	/* Currently we only support newer versions of this core
 	 * featuring a RX FIFO. Older cores found on some Coldfire
 	 * derivates are not yet supported.
 	 */
@@ -1168,7 +1156,7 @@
 	struct regulator *reg_xceiver;
 	struct resource *mem;
 	struct clk *clk_ipg = NULL, *clk_per = NULL;
-	void __iomem *base;
+	struct flexcan_regs __iomem *regs;
 	int err, irq;
 	u32 clock_freq = 0;
 
@@ -1180,7 +1168,7 @@
 
 	if (pdev->dev.of_node)
 		of_property_read_u32(pdev->dev.of_node,
-						"clock-frequency", &clock_freq);
+				     "clock-frequency", &clock_freq);
 
 	if (!clock_freq) {
 		clk_ipg = devm_clk_get(&pdev->dev, "ipg");
@@ -1202,9 +1190,9 @@
 	if (irq <= 0)
 		return -ENODEV;
 
-	base = devm_ioremap_resource(&pdev->dev, mem);
-	if (IS_ERR(base))
-		return PTR_ERR(base);
+	regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
 
 	of_id = of_match_device(flexcan_of_match, &pdev->dev);
 	if (of_id) {
@@ -1232,12 +1220,11 @@
 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
 		CAN_CTRLMODE_LISTENONLY	| CAN_CTRLMODE_3_SAMPLES |
 		CAN_CTRLMODE_BERR_REPORTING;
-	priv->base = base;
+	priv->regs = regs;
 	priv->clk_ipg = clk_ipg;
 	priv->clk_per = clk_per;
 	priv->pdata = dev_get_platdata(&pdev->dev);
 	priv->devtype_data = devtype_data;
-
 	priv->reg_xceiver = reg_xceiver;
 
 	netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
@@ -1254,7 +1241,7 @@
 	devm_can_led_init(dev);
 
 	dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
-		 priv->base, dev->irq);
+		 priv->regs, dev->irq);
 
 	return 0;
 
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
new file mode 100644
index 0000000..10d8497
--- /dev/null
+++ b/drivers/net/can/sun4i_can.c
@@ -0,0 +1,857 @@
+/*
+ * sun4i_can.c - CAN bus controller driver for Allwinner SUN4I&SUN7I based SoCs
+ *
+ * Copyright (C) 2013 Peter Chen
+ * Copyright (C) 2015 Gerhard Bertelsmann
+ * All rights reserved.
+ *
+ * Parts of this software are based on (derived from) the SJA1000 code by:
+ *   Copyright (C) 2014 Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
+ *   Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
+ *   Copyright (C) 2002-2007 Volkswagen Group Electronic Research
+ *   Copyright (C) 2003 Matthias Brukner, Trajet Gmbh, Rebenring 33,
+ *   38106 Braunschweig, GERMANY
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/led.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "sun4i_can"
+
+/* Registers address (physical base address 0x01C2BC00) */
+#define SUN4I_REG_MSEL_ADDR	0x0000	/* CAN Mode Select */
+#define SUN4I_REG_CMD_ADDR	0x0004	/* CAN Command */
+#define SUN4I_REG_STA_ADDR	0x0008	/* CAN Status */
+#define SUN4I_REG_INT_ADDR	0x000c	/* CAN Interrupt Flag */
+#define SUN4I_REG_INTEN_ADDR	0x0010	/* CAN Interrupt Enable */
+#define SUN4I_REG_BTIME_ADDR	0x0014	/* CAN Bus Timing 0 */
+#define SUN4I_REG_TEWL_ADDR	0x0018	/* CAN Tx Error Warning Limit */
+#define SUN4I_REG_ERRC_ADDR	0x001c	/* CAN Error Counter */
+#define SUN4I_REG_RMCNT_ADDR	0x0020	/* CAN Receive Message Counter */
+#define SUN4I_REG_RBUFSA_ADDR	0x0024	/* CAN Receive Buffer Start Address */
+#define SUN4I_REG_BUF0_ADDR	0x0040	/* CAN Tx/Rx Buffer 0 */
+#define SUN4I_REG_BUF1_ADDR	0x0044	/* CAN Tx/Rx Buffer 1 */
+#define SUN4I_REG_BUF2_ADDR	0x0048	/* CAN Tx/Rx Buffer 2 */
+#define SUN4I_REG_BUF3_ADDR	0x004c	/* CAN Tx/Rx Buffer 3 */
+#define SUN4I_REG_BUF4_ADDR	0x0050	/* CAN Tx/Rx Buffer 4 */
+#define SUN4I_REG_BUF5_ADDR	0x0054	/* CAN Tx/Rx Buffer 5 */
+#define SUN4I_REG_BUF6_ADDR	0x0058	/* CAN Tx/Rx Buffer 6 */
+#define SUN4I_REG_BUF7_ADDR	0x005c	/* CAN Tx/Rx Buffer 7 */
+#define SUN4I_REG_BUF8_ADDR	0x0060	/* CAN Tx/Rx Buffer 8 */
+#define SUN4I_REG_BUF9_ADDR	0x0064	/* CAN Tx/Rx Buffer 9 */
+#define SUN4I_REG_BUF10_ADDR	0x0068	/* CAN Tx/Rx Buffer 10 */
+#define SUN4I_REG_BUF11_ADDR	0x006c	/* CAN Tx/Rx Buffer 11 */
+#define SUN4I_REG_BUF12_ADDR	0x0070	/* CAN Tx/Rx Buffer 12 */
+#define SUN4I_REG_ACPC_ADDR	0x0040	/* CAN Acceptance Code 0 */
+#define SUN4I_REG_ACPM_ADDR	0x0044	/* CAN Acceptance Mask 0 */
+#define SUN4I_REG_RBUF_RBACK_START_ADDR	0x0180	/* CAN transmit buffer start */
+#define SUN4I_REG_RBUF_RBACK_END_ADDR	0x01b0	/* CAN transmit buffer end */
+
+/* Controller Register Description */
+
+/* mode select register (r/w)
+ * offset:0x0000 default:0x0000_0001
+ */
+#define SUN4I_MSEL_SLEEP_MODE		(0x01 << 4) /* write in reset mode */
+#define SUN4I_MSEL_WAKE_UP		(0x00 << 4)
+#define SUN4I_MSEL_SINGLE_FILTER	(0x01 << 3) /* write in reset mode */
+#define SUN4I_MSEL_DUAL_FILTERS		(0x00 << 3)
+#define SUN4I_MSEL_LOOPBACK_MODE	BIT(2)
+#define SUN4I_MSEL_LISTEN_ONLY_MODE	BIT(1)
+#define SUN4I_MSEL_RESET_MODE		BIT(0)
+
+/* command register (w)
+ * offset:0x0004 default:0x0000_0000
+ */
+#define SUN4I_CMD_BUS_OFF_REQ	BIT(5)
+#define SUN4I_CMD_SELF_RCV_REQ	BIT(4)
+#define SUN4I_CMD_CLEAR_OR_FLAG	BIT(3)
+#define SUN4I_CMD_RELEASE_RBUF	BIT(2)
+#define SUN4I_CMD_ABORT_REQ	BIT(1)
+#define SUN4I_CMD_TRANS_REQ	BIT(0)
+
+/* status register (r)
+ * offset:0x0008 default:0x0000_003c
+ */
+#define SUN4I_STA_BIT_ERR	(0x00 << 22)
+#define SUN4I_STA_FORM_ERR	(0x01 << 22)
+#define SUN4I_STA_STUFF_ERR	(0x02 << 22)
+#define SUN4I_STA_OTHER_ERR	(0x03 << 22)
+#define SUN4I_STA_MASK_ERR	(0x03 << 22)
+#define SUN4I_STA_ERR_DIR	BIT(21)
+#define SUN4I_STA_ERR_SEG_CODE	(0x1f << 16)
+#define SUN4I_STA_START		(0x03 << 16)
+#define SUN4I_STA_ID28_21	(0x02 << 16)
+#define SUN4I_STA_ID20_18	(0x06 << 16)
+#define SUN4I_STA_SRTR		(0x04 << 16)
+#define SUN4I_STA_IDE		(0x05 << 16)
+#define SUN4I_STA_ID17_13	(0x07 << 16)
+#define SUN4I_STA_ID12_5	(0x0f << 16)
+#define SUN4I_STA_ID4_0		(0x0e << 16)
+#define SUN4I_STA_RTR		(0x0c << 16)
+#define SUN4I_STA_RB1		(0x0d << 16)
+#define SUN4I_STA_RB0		(0x09 << 16)
+#define SUN4I_STA_DLEN		(0x0b << 16)
+#define SUN4I_STA_DATA_FIELD	(0x0a << 16)
+#define SUN4I_STA_CRC_SEQUENCE	(0x08 << 16)
+#define SUN4I_STA_CRC_DELIMITER	(0x18 << 16)
+#define SUN4I_STA_ACK		(0x19 << 16)
+#define SUN4I_STA_ACK_DELIMITER	(0x1b << 16)
+#define SUN4I_STA_END		(0x1a << 16)
+#define SUN4I_STA_INTERMISSION	(0x12 << 16)
+#define SUN4I_STA_ACTIVE_ERROR	(0x11 << 16)
+#define SUN4I_STA_PASSIVE_ERROR	(0x16 << 16)
+#define SUN4I_STA_TOLERATE_DOMINANT_BITS	(0x13 << 16)
+#define SUN4I_STA_ERROR_DELIMITER	(0x17 << 16)
+#define SUN4I_STA_OVERLOAD	(0x1c << 16)
+#define SUN4I_STA_BUS_OFF	BIT(7)
+#define SUN4I_STA_ERR_STA	BIT(6)
+#define SUN4I_STA_TRANS_BUSY	BIT(5)
+#define SUN4I_STA_RCV_BUSY	BIT(4)
+#define SUN4I_STA_TRANS_OVER	BIT(3)
+#define SUN4I_STA_TBUF_RDY	BIT(2)
+#define SUN4I_STA_DATA_ORUN	BIT(1)
+#define SUN4I_STA_RBUF_RDY	BIT(0)
+
+/* interrupt register (r)
+ * offset:0x000c default:0x0000_0000
+ */
+#define SUN4I_INT_BUS_ERR	BIT(7)
+#define SUN4I_INT_ARB_LOST	BIT(6)
+#define SUN4I_INT_ERR_PASSIVE	BIT(5)
+#define SUN4I_INT_WAKEUP	BIT(4)
+#define SUN4I_INT_DATA_OR	BIT(3)
+#define SUN4I_INT_ERR_WRN	BIT(2)
+#define SUN4I_INT_TBUF_VLD	BIT(1)
+#define SUN4I_INT_RBUF_VLD	BIT(0)
+
+/* interrupt enable register (r/w)
+ * offset:0x0010 default:0x0000_0000
+ */
+#define SUN4I_INTEN_BERR	BIT(7)
+#define SUN4I_INTEN_ARB_LOST	BIT(6)
+#define SUN4I_INTEN_ERR_PASSIVE	BIT(5)
+#define SUN4I_INTEN_WAKEUP	BIT(4)
+#define SUN4I_INTEN_OR		BIT(3)
+#define SUN4I_INTEN_ERR_WRN	BIT(2)
+#define SUN4I_INTEN_TX		BIT(1)
+#define SUN4I_INTEN_RX		BIT(0)
+
+/* error code */
+#define SUN4I_ERR_INRCV		(0x1 << 5)
+#define SUN4I_ERR_INTRANS	(0x0 << 5)
+
+/* filter mode */
+#define SUN4I_FILTER_CLOSE	0
+#define SUN4I_SINGLE_FLTER_MODE	1
+#define SUN4I_DUAL_FILTER_MODE	2
+
+/* message buffer flags */
+#define SUN4I_MSG_EFF_FLAG	BIT(7)
+#define SUN4I_MSG_RTR_FLAG	BIT(6)
+
+/* max. number of interrupts handled in ISR */
+#define SUN4I_CAN_MAX_IRQ	20
+#define SUN4I_MODE_MAX_RETRIES	100
+
+struct sun4ican_priv {
+	struct can_priv can;
+	void __iomem *base;
+	struct clk *clk;
+	spinlock_t cmdreg_lock;	/* lock for concurrent cmd register writes */
+};
+
+static const struct can_bittiming_const sun4ican_bittiming_const = {
+	.name = DRV_NAME,
+	.tseg1_min = 1,
+	.tseg1_max = 16,
+	.tseg2_min = 1,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 64,
+	.brp_inc = 1,
+};
+
+static void sun4i_can_write_cmdreg(struct sun4ican_priv *priv, u8 val)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->cmdreg_lock, flags);
+	writel(val, priv->base + SUN4I_REG_CMD_ADDR);
+	spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
+}
+
+static int set_normal_mode(struct net_device *dev)
+{
+	struct sun4ican_priv *priv = netdev_priv(dev);
+	int retry = SUN4I_MODE_MAX_RETRIES;
+	u32 mod_reg_val = 0;
+
+	do {
+		mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
+		mod_reg_val &= ~SUN4I_MSEL_RESET_MODE;
+		writel(mod_reg_val, priv->base + SUN4I_REG_MSEL_ADDR);
+	} while (retry-- && (mod_reg_val & SUN4I_MSEL_RESET_MODE));
+
+	if (readl(priv->base + SUN4I_REG_MSEL_ADDR) & SUN4I_MSEL_RESET_MODE) {
+		netdev_err(dev,
+			   "setting controller into normal mode failed!\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int set_reset_mode(struct net_device *dev)
+{
+	struct sun4ican_priv *priv = netdev_priv(dev);
+	int retry = SUN4I_MODE_MAX_RETRIES;
+	u32 mod_reg_val = 0;
+
+	do {
+		mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
+		mod_reg_val |= SUN4I_MSEL_RESET_MODE;
+		writel(mod_reg_val, priv->base + SUN4I_REG_MSEL_ADDR);
+	} while (retry-- && !(mod_reg_val & SUN4I_MSEL_RESET_MODE));
+
+	if (!(readl(priv->base + SUN4I_REG_MSEL_ADDR) &
+	      SUN4I_MSEL_RESET_MODE)) {
+		netdev_err(dev, "setting controller into reset mode failed!\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/* bittiming is called in reset_mode only */
+static int sun4ican_set_bittiming(struct net_device *dev)
+{
+	struct sun4ican_priv *priv = netdev_priv(dev);
+	struct can_bittiming *bt = &priv->can.bittiming;
+	u32 cfg;
+
+	cfg = ((bt->brp - 1) & 0x3FF) |
+	     (((bt->sjw - 1) & 0x3) << 14) |
+	     (((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) << 16) |
+	     (((bt->phase_seg2 - 1) & 0x7) << 20);
+	if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+		cfg |= 0x800000;
+
+	netdev_dbg(dev, "setting BITTIMING=0x%08x\n", cfg);
+	writel(cfg, priv->base + SUN4I_REG_BTIME_ADDR);
+
+	return 0;
+}
+
+static int sun4ican_get_berr_counter(const struct net_device *dev,
+				     struct can_berr_counter *bec)
+{
+	struct sun4ican_priv *priv = netdev_priv(dev);
+	u32 errors;
+	int err;
+
+	err = clk_prepare_enable(priv->clk);
+	if (err) {
+		netdev_err(dev, "could not enable clock\n");
+		return err;
+	}
+
+	errors = readl(priv->base + SUN4I_REG_ERRC_ADDR);
+
+	bec->txerr = errors & 0xFF;
+	bec->rxerr = (errors >> 16) & 0xFF;
+
+	clk_disable_unprepare(priv->clk);
+
+	return 0;
+}
+
+static int sun4i_can_start(struct net_device *dev)
+{
+	struct sun4ican_priv *priv = netdev_priv(dev);
+	int err;
+	u32 mod_reg_val;
+
+	/* we need to enter the reset mode */
+	err = set_reset_mode(dev);
+	if (err) {
+		netdev_err(dev, "could not enter reset mode\n");
+		return err;
+	}
+
+	/* set filters - we accept all */
+	writel(0x00000000, priv->base + SUN4I_REG_ACPC_ADDR);
+	writel(0xFFFFFFFF, priv->base + SUN4I_REG_ACPM_ADDR);
+
+	/* clear error counters and error code capture */
+	writel(0, priv->base + SUN4I_REG_ERRC_ADDR);
+
+	/* enable interrupts */
+	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+		writel(0xFF, priv->base + SUN4I_REG_INTEN_ADDR);
+	else
+		writel(0xFF & ~SUN4I_INTEN_BERR,
+		       priv->base + SUN4I_REG_INTEN_ADDR);
+
+	/* enter the selected mode */
+	mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
+	if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
+		mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
+	else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+		mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
+	writel(mod_reg_val, priv->base + SUN4I_REG_MSEL_ADDR);
+
+	err = sun4ican_set_bittiming(dev);
+	if (err)
+		return err;
+
+	/* we are ready to enter the normal mode */
+	err = set_normal_mode(dev);
+	if (err) {
+		netdev_err(dev, "could not enter normal mode\n");
+		return err;
+	}
+
+	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+	return 0;
+}
+
+static int sun4i_can_stop(struct net_device *dev)
+{
+	struct sun4ican_priv *priv = netdev_priv(dev);
+	int err;
+
+	priv->can.state = CAN_STATE_STOPPED;
+	/* we need to enter reset mode */
+	err = set_reset_mode(dev);
+	if (err) {
+		netdev_err(dev, "could not enter reset mode\n");
+		return err;
+	}
+
+	/* disable all interrupts */
+	writel(0, priv->base + SUN4I_REG_INTEN_ADDR);
+
+	return 0;
+}
+
+static int sun4ican_set_mode(struct net_device *dev, enum can_mode mode)
+{
+	int err;
+
+	switch (mode) {
+	case CAN_MODE_START:
+		err = sun4i_can_start(dev);
+		if (err) {
+			netdev_err(dev, "starting CAN controller failed!\n");
+			return err;
+		}
+		if (netif_queue_stopped(dev))
+			netif_wake_queue(dev);
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/* transmit a CAN message
+ * message layout in the sk_buff should be like this:
+ * xx xx xx xx         ff         ll 00 11 22 33 44 55 66 77
+ * [ can_id ] [flags] [len] [can data (up to 8 bytes]
+ */
+static int sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct sun4ican_priv *priv = netdev_priv(dev);
+	struct can_frame *cf = (struct can_frame *)skb->data;
+	u8 dlc;
+	u32 dreg, msg_flag_n;
+	canid_t id;
+	int i;
+
+	if (can_dropped_invalid_skb(dev, skb))
+		return NETDEV_TX_OK;
+
+	netif_stop_queue(dev);
+
+	id = cf->can_id;
+	dlc = cf->can_dlc;
+	msg_flag_n = dlc;
+
+	if (id & CAN_RTR_FLAG)
+		msg_flag_n |= SUN4I_MSG_RTR_FLAG;
+
+	if (id & CAN_EFF_FLAG) {
+		msg_flag_n |= SUN4I_MSG_EFF_FLAG;
+		dreg = SUN4I_REG_BUF5_ADDR;
+		writel((id >> 21) & 0xFF, priv->base + SUN4I_REG_BUF1_ADDR);
+		writel((id >> 13) & 0xFF, priv->base + SUN4I_REG_BUF2_ADDR);
+		writel((id >> 5)  & 0xFF, priv->base + SUN4I_REG_BUF3_ADDR);
+		writel((id << 3)  & 0xF8, priv->base + SUN4I_REG_BUF4_ADDR);
+	} else {
+		dreg = SUN4I_REG_BUF3_ADDR;
+		writel((id >> 3) & 0xFF, priv->base + SUN4I_REG_BUF1_ADDR);
+		writel((id << 5) & 0xE0, priv->base + SUN4I_REG_BUF2_ADDR);
+	}
+
+	for (i = 0; i < dlc; i++)
+		writel(cf->data[i], priv->base + (dreg + i * 4));
+
+	writel(msg_flag_n, priv->base + SUN4I_REG_BUF0_ADDR);
+
+	can_put_echo_skb(skb, dev, 0);
+
+	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+		sun4i_can_write_cmdreg(priv, SUN4I_CMD_SELF_RCV_REQ);
+	else
+		sun4i_can_write_cmdreg(priv, SUN4I_CMD_TRANS_REQ);
+
+	return NETDEV_TX_OK;
+}
+
+static void sun4i_can_rx(struct net_device *dev)
+{
+	struct sun4ican_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	u8 fi;
+	u32 dreg;
+	canid_t id;
+	int i;
+
+	/* create zero'ed CAN frame buffer */
+	skb = alloc_can_skb(dev, &cf);
+	if (!skb)
+		return;
+
+	fi = readl(priv->base + SUN4I_REG_BUF0_ADDR);
+	cf->can_dlc = get_can_dlc(fi & 0x0F);
+	if (fi & SUN4I_MSG_EFF_FLAG) {
+		dreg = SUN4I_REG_BUF5_ADDR;
+		id = (readl(priv->base + SUN4I_REG_BUF1_ADDR) << 21) |
+		     (readl(priv->base + SUN4I_REG_BUF2_ADDR) << 13) |
+		     (readl(priv->base + SUN4I_REG_BUF3_ADDR) << 5)  |
+		    ((readl(priv->base + SUN4I_REG_BUF4_ADDR) >> 3)  & 0x1f);
+		id |= CAN_EFF_FLAG;
+	} else {
+		dreg = SUN4I_REG_BUF3_ADDR;
+		id = (readl(priv->base + SUN4I_REG_BUF1_ADDR) << 3) |
+		    ((readl(priv->base + SUN4I_REG_BUF2_ADDR) >> 5) & 0x7);
+	}
+
+	/* remote frame ? */
+	if (fi & SUN4I_MSG_RTR_FLAG)
+		id |= CAN_RTR_FLAG;
+	else
+		for (i = 0; i < cf->can_dlc; i++)
+			cf->data[i] = readl(priv->base + dreg + i * 4);
+
+	cf->can_id = id;
+
+	sun4i_can_write_cmdreg(priv, SUN4I_CMD_RELEASE_RBUF);
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+	netif_rx(skb);
+
+	can_led_event(dev, CAN_LED_EVENT_RX);
+}
+
+static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
+{
+	struct sun4ican_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	enum can_state state = priv->can.state;
+	enum can_state rx_state, tx_state;
+	unsigned int rxerr, txerr, errc;
+	u32 ecc, alc;
+
+	/* we don't skip if alloc fails because we want the stats anyhow */
+	skb = alloc_can_err_skb(dev, &cf);
+
+	errc = readl(priv->base + SUN4I_REG_ERRC_ADDR);
+	rxerr = (errc >> 16) & 0xFF;
+	txerr = errc & 0xFF;
+
+	if (skb) {
+		cf->data[6] = txerr;
+		cf->data[7] = rxerr;
+	}
+
+	if (isrc & SUN4I_INT_DATA_OR) {
+		/* data overrun interrupt */
+		netdev_dbg(dev, "data overrun interrupt\n");
+		if (likely(skb)) {
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+		}
+		stats->rx_over_errors++;
+		stats->rx_errors++;
+		/* clear bit */
+		sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG);
+	}
+	if (isrc & SUN4I_INT_ERR_WRN) {
+		/* error warning interrupt */
+		netdev_dbg(dev, "error warning interrupt\n");
+
+		if (status & SUN4I_STA_BUS_OFF)
+			state = CAN_STATE_BUS_OFF;
+		else if (status & SUN4I_STA_ERR_STA)
+			state = CAN_STATE_ERROR_WARNING;
+		else
+			state = CAN_STATE_ERROR_ACTIVE;
+	}
+	if (isrc & SUN4I_INT_BUS_ERR) {
+		/* bus error interrupt */
+		netdev_dbg(dev, "bus error interrupt\n");
+		priv->can.can_stats.bus_error++;
+		stats->rx_errors++;
+
+		if (likely(skb)) {
+			ecc = readl(priv->base + SUN4I_REG_STA_ADDR);
+
+			cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+			switch (ecc & SUN4I_STA_MASK_ERR) {
+			case SUN4I_STA_BIT_ERR:
+				cf->data[2] |= CAN_ERR_PROT_BIT;
+				break;
+			case SUN4I_STA_FORM_ERR:
+				cf->data[2] |= CAN_ERR_PROT_FORM;
+				break;
+			case SUN4I_STA_STUFF_ERR:
+				cf->data[2] |= CAN_ERR_PROT_STUFF;
+				break;
+			default:
+				cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+				cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE)
+					       >> 16;
+				break;
+			}
+			/* error occurred during transmission? */
+			if ((ecc & SUN4I_STA_ERR_DIR) == 0)
+				cf->data[2] |= CAN_ERR_PROT_TX;
+		}
+	}
+	if (isrc & SUN4I_INT_ERR_PASSIVE) {
+		/* error passive interrupt */
+		netdev_dbg(dev, "error passive interrupt\n");
+		if (state == CAN_STATE_ERROR_PASSIVE)
+			state = CAN_STATE_ERROR_WARNING;
+		else
+			state = CAN_STATE_ERROR_PASSIVE;
+	}
+	if (isrc & SUN4I_INT_ARB_LOST) {
+		/* arbitration lost interrupt */
+		netdev_dbg(dev, "arbitration lost interrupt\n");
+		alc = readl(priv->base + SUN4I_REG_STA_ADDR);
+		priv->can.can_stats.arbitration_lost++;
+		stats->tx_errors++;
+		if (likely(skb)) {
+			cf->can_id |= CAN_ERR_LOSTARB;
+			cf->data[0] = (alc & 0x1f) >> 8;
+		}
+	}
+
+	if (state != priv->can.state) {
+		tx_state = txerr >= rxerr ? state : 0;
+		rx_state = txerr <= rxerr ? state : 0;
+
+		if (likely(skb))
+			can_change_state(dev, cf, tx_state, rx_state);
+		else
+			priv->can.state = state;
+		if (state == CAN_STATE_BUS_OFF)
+			can_bus_off(dev);
+	}
+
+	if (likely(skb)) {
+		stats->rx_packets++;
+		stats->rx_bytes += cf->can_dlc;
+		netif_rx(skb);
+	} else {
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct sun4ican_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	u8 isrc, status;
+	int n = 0;
+
+	while ((isrc = readl(priv->base + SUN4I_REG_INT_ADDR)) &&
+	       (n < SUN4I_CAN_MAX_IRQ)) {
+		n++;
+		status = readl(priv->base + SUN4I_REG_STA_ADDR);
+
+		if (isrc & SUN4I_INT_WAKEUP)
+			netdev_warn(dev, "wakeup interrupt\n");
+
+		if (isrc & SUN4I_INT_TBUF_VLD) {
+			/* transmission complete interrupt */
+			stats->tx_bytes +=
+			    readl(priv->base +
+				  SUN4I_REG_RBUF_RBACK_START_ADDR) & 0xf;
+			stats->tx_packets++;
+			can_get_echo_skb(dev, 0);
+			netif_wake_queue(dev);
+			can_led_event(dev, CAN_LED_EVENT_TX);
+		}
+		if (isrc & SUN4I_INT_RBUF_VLD) {
+			/* receive interrupt */
+			while (status & SUN4I_STA_RBUF_RDY) {
+				/* RX buffer is not empty */
+				sun4i_can_rx(dev);
+				status = readl(priv->base + SUN4I_REG_STA_ADDR);
+			}
+		}
+		if (isrc &
+		    (SUN4I_INT_DATA_OR | SUN4I_INT_ERR_WRN | SUN4I_INT_BUS_ERR |
+		     SUN4I_INT_ERR_PASSIVE | SUN4I_INT_ARB_LOST)) {
+			/* error interrupt */
+			if (sun4i_can_err(dev, isrc, status))
+				netdev_err(dev, "can't allocate buffer - clearing pending interrupts\n");
+		}
+		/* clear interrupts */
+		writel(isrc, priv->base + SUN4I_REG_INT_ADDR);
+		readl(priv->base + SUN4I_REG_INT_ADDR);
+	}
+	if (n >= SUN4I_CAN_MAX_IRQ)
+		netdev_dbg(dev, "%d messages handled in ISR", n);
+
+	return (n) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int sun4ican_open(struct net_device *dev)
+{
+	struct sun4ican_priv *priv = netdev_priv(dev);
+	int err;
+
+	/* common open */
+	err = open_candev(dev);
+	if (err)
+		return err;
+
+	/* register interrupt handler */
+	err = request_irq(dev->irq, sun4i_can_interrupt, 0, dev->name, dev);
+	if (err) {
+		netdev_err(dev, "request_irq err: %d\n", err);
+		goto exit_irq;
+	}
+
+	/* turn on clocking for CAN peripheral block */
+	err = clk_prepare_enable(priv->clk);
+	if (err) {
+		netdev_err(dev, "could not enable CAN peripheral clock\n");
+		goto exit_clock;
+	}
+
+	err = sun4i_can_start(dev);
+	if (err) {
+		netdev_err(dev, "could not start CAN peripheral\n");
+		goto exit_can_start;
+	}
+
+	can_led_event(dev, CAN_LED_EVENT_OPEN);
+	netif_start_queue(dev);
+
+	return 0;
+
+exit_can_start:
+	clk_disable_unprepare(priv->clk);
+exit_clock:
+	free_irq(dev->irq, dev);
+exit_irq:
+	close_candev(dev);
+	return err;
+}
+
+static int sun4ican_close(struct net_device *dev)
+{
+	struct sun4ican_priv *priv = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	sun4i_can_stop(dev);
+	clk_disable_unprepare(priv->clk);
+
+	free_irq(dev->irq, dev);
+	close_candev(dev);
+	can_led_event(dev, CAN_LED_EVENT_STOP);
+
+	return 0;
+}
+
+static const struct net_device_ops sun4ican_netdev_ops = {
+	.ndo_open = sun4ican_open,
+	.ndo_stop = sun4ican_close,
+	.ndo_start_xmit = sun4ican_start_xmit,
+};
+
+static const struct of_device_id sun4ican_of_match[] = {
+	{.compatible = "allwinner,sun4i-a10-can"},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, sun4ican_of_match);
+
+static int sun4ican_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+
+	unregister_netdev(dev);
+	free_candev(dev);
+
+	return 0;
+}
+
+static int sun4ican_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *mem;
+	struct clk *clk;
+	void __iomem *addr;
+	int err, irq;
+	struct net_device *dev;
+	struct sun4ican_priv *priv;
+
+	clk = of_clk_get(np, 0);
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "unable to request clock\n");
+		err = -ENODEV;
+		goto exit;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "could not get a valid irq\n");
+		err = -ENODEV;
+		goto exit;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	addr = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(addr)) {
+		err = -EBUSY;
+		goto exit;
+	}
+
+	dev = alloc_candev(sizeof(struct sun4ican_priv), 1);
+	if (!dev) {
+		dev_err(&pdev->dev,
+			"could not allocate memory for CAN device\n");
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	dev->netdev_ops = &sun4ican_netdev_ops;
+	dev->irq = irq;
+	dev->flags |= IFF_ECHO;
+
+	priv = netdev_priv(dev);
+	priv->can.clock.freq = clk_get_rate(clk);
+	priv->can.bittiming_const = &sun4ican_bittiming_const;
+	priv->can.do_set_mode = sun4ican_set_mode;
+	priv->can.do_get_berr_counter = sun4ican_get_berr_counter;
+	priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
+				       CAN_CTRLMODE_LISTENONLY |
+				       CAN_CTRLMODE_LOOPBACK |
+				       CAN_CTRLMODE_PRESUME_ACK |
+				       CAN_CTRLMODE_3_SAMPLES;
+	priv->base = addr;
+	priv->clk = clk;
+	spin_lock_init(&priv->cmdreg_lock);
+
+	platform_set_drvdata(pdev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	err = register_candev(dev);
+	if (err) {
+		dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+			DRV_NAME, err);
+		goto exit_free;
+	}
+	devm_can_led_init(dev);
+
+	dev_info(&pdev->dev, "device registered (base=%p, irq=%d)\n",
+		 priv->base, dev->irq);
+
+	return 0;
+
+exit_free:
+	free_candev(dev);
+exit:
+	return err;
+}
+
+static struct platform_driver sun4i_can_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = sun4ican_of_match,
+	},
+	.probe = sun4ican_probe,
+	.remove = sun4ican_remove,
+};
+
+module_platform_driver(sun4i_can_driver);
+
+MODULE_AUTHOR("Peter Chen <xingkongcp@gmail.com>");
+MODULE_AUTHOR("Gerhard Bertelsmann <info@gerhard-bertelsmann.de>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION(DRV_NAME "CAN driver for Allwinner SoCs (A10/A20)");
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index 3de2a6d..4bcfd68 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -125,7 +125,6 @@
 	.set_addr		= mv88e6xxx_set_addr_indirect,
 	.phy_read		= mv88e6xxx_phy_read,
 	.phy_write		= mv88e6xxx_phy_write,
-	.poll_link		= mv88e6xxx_poll_link,
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 3e83865..c73121c 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -178,7 +178,6 @@
 	.set_addr		= mv88e6xxx_set_addr_direct,
 	.phy_read		= mv88e6131_phy_read,
 	.phy_write		= mv88e6131_phy_write,
-	.poll_link		= mv88e6xxx_poll_link,
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index c2daaf0..ca3330a 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -104,7 +104,6 @@
 	.set_addr		= mv88e6xxx_set_addr_indirect,
 	.phy_read		= mv88e6xxx_phy_read_indirect,
 	.phy_write		= mv88e6xxx_phy_write_indirect,
-	.poll_link		= mv88e6xxx_poll_link,
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
@@ -122,6 +121,7 @@
 	.port_vlan_add		= mv88e6xxx_port_vlan_add,
 	.port_vlan_del		= mv88e6xxx_port_vlan_del,
 	.vlan_getnext		= mv88e6xxx_vlan_getnext,
+	.port_fdb_prepare	= mv88e6xxx_port_fdb_prepare,
 	.port_fdb_add		= mv88e6xxx_port_fdb_add,
 	.port_fdb_del		= mv88e6xxx_port_fdb_del,
 	.port_fdb_getnext	= mv88e6xxx_port_fdb_getnext,
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 1f5129c..078a358 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -324,7 +324,6 @@
 	.set_addr		= mv88e6xxx_set_addr_indirect,
 	.phy_read		= mv88e6xxx_phy_read_indirect,
 	.phy_write		= mv88e6xxx_phy_write_indirect,
-	.poll_link		= mv88e6xxx_poll_link,
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
@@ -349,6 +348,7 @@
 	.port_vlan_add		= mv88e6xxx_port_vlan_add,
 	.port_vlan_del		= mv88e6xxx_port_vlan_del,
 	.vlan_getnext		= mv88e6xxx_vlan_getnext,
+	.port_fdb_prepare	= mv88e6xxx_port_fdb_prepare,
 	.port_fdb_add		= mv88e6xxx_port_fdb_add,
 	.port_fdb_del		= mv88e6xxx_port_fdb_del,
 	.port_fdb_getnext	= mv88e6xxx_port_fdb_getnext,
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 30227ca..87b405e 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -23,6 +23,7 @@
 #include <linux/phy.h>
 #include <linux/seq_file.h>
 #include <net/dsa.h>
+#include <net/switchdev.h>
 #include "mv88e6xxx.h"
 
 /* MDIO bus access can be nested in the case of PHYs connected to the
@@ -388,73 +389,6 @@
 }
 #endif
 
-void mv88e6xxx_poll_link(struct dsa_switch *ds)
-{
-	int i;
-
-	for (i = 0; i < DSA_MAX_PORTS; i++) {
-		struct net_device *dev;
-		int uninitialized_var(port_status);
-		int pcs_ctrl;
-		int link;
-		int speed;
-		int duplex;
-		int fc;
-
-		dev = ds->ports[i];
-		if (dev == NULL)
-			continue;
-
-		pcs_ctrl = mv88e6xxx_reg_read(ds, REG_PORT(i), PORT_PCS_CTRL);
-		if (pcs_ctrl < 0 || pcs_ctrl & PORT_PCS_CTRL_FORCE_LINK)
-			continue;
-
-		link = 0;
-		if (dev->flags & IFF_UP) {
-			port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
-							 PORT_STATUS);
-			if (port_status < 0)
-				continue;
-
-			link = !!(port_status & PORT_STATUS_LINK);
-		}
-
-		if (!link) {
-			if (netif_carrier_ok(dev)) {
-				netdev_info(dev, "link down\n");
-				netif_carrier_off(dev);
-			}
-			continue;
-		}
-
-		switch (port_status & PORT_STATUS_SPEED_MASK) {
-		case PORT_STATUS_SPEED_10:
-			speed = 10;
-			break;
-		case PORT_STATUS_SPEED_100:
-			speed = 100;
-			break;
-		case PORT_STATUS_SPEED_1000:
-			speed = 1000;
-			break;
-		default:
-			speed = -1;
-			break;
-		}
-		duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
-		fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
-
-		if (!netif_carrier_ok(dev)) {
-			netdev_info(dev,
-				    "link up, %d Mb/s, %s duplex, flow control %sabled\n",
-				    speed,
-				    duplex ? "full" : "half",
-				    fc ? "en" : "dis");
-			netif_carrier_on(dev);
-		}
-	}
-}
-
 static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -574,7 +508,8 @@
 			   struct phy_device *phydev)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	u32 ret, reg;
+	u32 reg;
+	int ret;
 
 	if (!phy_is_pseudo_fixed_link(phydev))
 		return;
@@ -1907,30 +1842,41 @@
 	return _mv88e6xxx_atu_load(ds, &entry);
 }
 
-int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
-			   const unsigned char *addr, u16 vid)
+int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
+			       const struct switchdev_obj_port_fdb *fdb,
+			       struct switchdev_trans *trans)
 {
-	int state = is_multicast_ether_addr(addr) ?
+	/* We don't need any dynamic resource from the kernel (yet),
+	 * so skip the prepare phase.
+	 */
+	return 0;
+}
+
+int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
+			   const struct switchdev_obj_port_fdb *fdb,
+			   struct switchdev_trans *trans)
+{
+	int state = is_multicast_ether_addr(fdb->addr) ?
 		GLOBAL_ATU_DATA_STATE_MC_STATIC :
 		GLOBAL_ATU_DATA_STATE_UC_STATIC;
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state);
+	ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state);
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
 }
 
 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
-			   const unsigned char *addr, u16 vid)
+			   const struct switchdev_obj_port_fdb *fdb)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid,
+	ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid,
 				       GLOBAL_ATU_DATA_STATE_UNUSED);
 	mutex_unlock(&ps->smi_mutex);
 
@@ -2074,6 +2020,7 @@
 		 */
 		reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
 		if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+			reg &= ~PORT_PCS_CTRL_UNFORCED;
 			reg |= PORT_PCS_CTRL_FORCE_LINK |
 				PORT_PCS_CTRL_LINK_UP |
 				PORT_PCS_CTRL_DUPLEX_FULL |
@@ -2124,6 +2071,8 @@
 				reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
 			else
 				reg |= PORT_CONTROL_FRAME_MODE_DSA;
+			reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+				PORT_CONTROL_FORWARD_UNKNOWN_MC;
 		}
 
 		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 1cf3cf8..8325c11 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -442,7 +442,6 @@
 int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum);
 int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
 			    int regnum, u16 val);
-void mv88e6xxx_poll_link(struct dsa_switch *ds);
 void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
 void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
 				 uint64_t *data);
@@ -475,10 +474,14 @@
 int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid);
 int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
 			   unsigned long *ports, unsigned long *untagged);
+int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
+			       const struct switchdev_obj_port_fdb *fdb,
+			       struct switchdev_trans *trans);
 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
-			   const unsigned char *addr, u16 vid);
+			   const struct switchdev_obj_port_fdb *fdb,
+			   struct switchdev_trans *trans);
 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
-			   const unsigned char *addr, u16 vid);
+			   const struct switchdev_obj_port_fdb *fdb);
 int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
 			       unsigned char *addr, u16 *vid, bool *is_static);
 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index edf7225..29c3075 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -64,7 +64,7 @@
 	  should say Y to this option if you wish to use it with Linux.
 
 config MAC8390
-	bool "Macintosh NS 8390 based ethernet cards"
+	tristate "Macintosh NS 8390 based ethernet cards"
 	depends on MAC
 	select CRC32
 	---help---
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 65cf60f..b928390 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -454,34 +454,22 @@
 MODULE_DESCRIPTION("Macintosh NS8390-based Nubus Ethernet driver");
 MODULE_LICENSE("GPL");
 
-/* overkill, of course */
-static struct net_device *dev_mac8390[15];
-int init_module(void)
+static struct net_device *dev_mac8390;
+
+int __init init_module(void)
 {
-	int i;
-	for (i = 0; i < 15; i++) {
-		struct net_device *dev = mac8390_probe(-1);
-		if (IS_ERR(dev))
-			break;
-		dev_mac890[i] = dev;
-	}
-	if (!i) {
-		pr_notice("No useable cards found, driver NOT installed.\n");
-		return -ENODEV;
+	dev_mac8390 = mac8390_probe(-1);
+	if (IS_ERR(dev_mac8390)) {
+		pr_warn("mac8390: No card found\n");
+		return PTR_ERR(dev_mac8390);
 	}
 	return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
-	int i;
-	for (i = 0; i < 15; i++) {
-		struct net_device *dev = dev_mac890[i];
-		if (dev) {
-			unregister_netdev(dev);
-			free_netdev(dev);
-		}
-	}
+	unregister_netdev(dev_mac8390);
+	free_netdev(dev_mac8390);
 }
 
 #endif /* MODULE */
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 98a10d5..66d0b73c 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -661,6 +661,7 @@
 	spin_unlock(&lp->devlock);
 	lance_interrupt(dev->irq, dev);
 }
+EXPORT_SYMBOL_GPL(lance_poll);
 #endif
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index afc62ea..0038709 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -100,7 +100,7 @@
 	  DEPCA series.  (This chipset is better known via the NE2100 cards.)
 
 config HPLANCE
-	bool "HP on-board LANCE support"
+	tristate "HP on-board LANCE support"
 	depends on DIO
 	select CRC32
 	---help---
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index a4473d8..4551224 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1940,84 +1940,31 @@
 static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
 						  unsigned int queue_count)
 {
-	unsigned int q_fifo_size = 0;
-	enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+	unsigned int q_fifo_size;
+	unsigned int p_fifo;
 
-	/* Calculate Tx/Rx fifo share per queue */
-	switch (fifo_size) {
-	case 0:
-		q_fifo_size = XGBE_FIFO_SIZE_B(128);
-		break;
-	case 1:
-		q_fifo_size = XGBE_FIFO_SIZE_B(256);
-		break;
-	case 2:
-		q_fifo_size = XGBE_FIFO_SIZE_B(512);
-		break;
-	case 3:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(1);
-		break;
-	case 4:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(2);
-		break;
-	case 5:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(4);
-		break;
-	case 6:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(8);
-		break;
-	case 7:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(16);
-		break;
-	case 8:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(32);
-		break;
-	case 9:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(64);
-		break;
-	case 10:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(128);
-		break;
-	case 11:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(256);
-		break;
-	}
+	/* Calculate the configured fifo size */
+	q_fifo_size = 1 << (fifo_size + 7);
 
-	/* The configured value is not the actual amount of fifo RAM */
+	/* The configured value may not be the actual amount of fifo RAM */
 	q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
 
 	q_fifo_size = q_fifo_size / queue_count;
 
-	/* Set the queue fifo size programmable value */
-	if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_512;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+	/* Each increment in the queue fifo size represents 256 bytes of
+	 * fifo, with 0 representing 256 bytes. Distribute the fifo equally
+	 * between the queues.
+	 */
+	p_fifo = q_fifo_size / 256;
+	if (p_fifo)
+		p_fifo--;
 
 	return p_fifo;
 }
 
 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
 {
-	enum xgbe_mtl_fifo_size fifo_size;
+	unsigned int fifo_size;
 	unsigned int i;
 
 	fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
@@ -2033,7 +1980,7 @@
 
 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
 {
-	enum xgbe_mtl_fifo_size fifo_size;
+	unsigned int fifo_size;
 	unsigned int i;
 
 	fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
@@ -2224,7 +2171,7 @@
 
 	default:
 		read_hi = false;
-	};
+	}
 
 	val = XGMAC_IOREAD(pdata, reg_lo);
 
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index aae9d5e..14bad8c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -360,9 +360,12 @@
 			}
 		}
 
+		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
+			pdata->ext_stats.rx_buffer_unavailable++;
+
 		/* Restart the device on a Fatal Bus Error */
 		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
-			schedule_work(&pdata->restart_work);
+			queue_work(pdata->dev_workqueue, &pdata->restart_work);
 
 		/* Clear all interrupt signals */
 		XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
@@ -384,7 +387,8 @@
 				/* Read Tx Timestamp to clear interrupt */
 				pdata->tx_tstamp =
 					hw_if->get_tx_tstamp(pdata);
-				schedule_work(&pdata->tx_tstamp_work);
+				queue_work(pdata->dev_workqueue,
+					   &pdata->tx_tstamp_work);
 			}
 		}
 	}
@@ -450,7 +454,7 @@
 {
 	struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
 
-	schedule_work(&pdata->service_work);
+	queue_work(pdata->dev_workqueue, &pdata->service_work);
 
 	mod_timer(&pdata->service_timer, jiffies + HZ);
 }
@@ -891,7 +895,7 @@
 	netif_tx_start_all_queues(netdev);
 
 	xgbe_start_timers(pdata);
-	schedule_work(&pdata->service_work);
+	queue_work(pdata->dev_workqueue, &pdata->service_work);
 
 	DBGPR("<--xgbe_start\n");
 
@@ -1533,7 +1537,7 @@
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 
 	netdev_warn(netdev, "tx timeout, device restarting\n");
-	schedule_work(&pdata->restart_work);
+	queue_work(pdata->dev_workqueue, &pdata->restart_work);
 }
 
 static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 59e090e..204fb3a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -179,6 +179,7 @@
 	XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
 	XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
 	XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
+	XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
 };
 
 #define XGBE_STATS_COUNT	ARRAY_SIZE(xgbe_gstring_stats)
@@ -187,8 +188,6 @@
 {
 	int i;
 
-	DBGPR("-->%s\n", __func__);
-
 	switch (stringset) {
 	case ETH_SS_STATS:
 		for (i = 0; i < XGBE_STATS_COUNT; i++) {
@@ -198,8 +197,6 @@
 		}
 		break;
 	}
-
-	DBGPR("<--%s\n", __func__);
 }
 
 static void xgbe_get_ethtool_stats(struct net_device *netdev,
@@ -209,23 +206,17 @@
 	u8 *stat;
 	int i;
 
-	DBGPR("-->%s\n", __func__);
-
 	pdata->hw_if.read_mmc_stats(pdata);
 	for (i = 0; i < XGBE_STATS_COUNT; i++) {
 		stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
 		*data++ = *(u64 *)stat;
 	}
-
-	DBGPR("<--%s\n", __func__);
 }
 
 static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
 {
 	int ret;
 
-	DBGPR("-->%s\n", __func__);
-
 	switch (stringset) {
 	case ETH_SS_STATS:
 		ret = XGBE_STATS_COUNT;
@@ -235,8 +226,6 @@
 		ret = -EOPNOTSUPP;
 	}
 
-	DBGPR("<--%s\n", __func__);
-
 	return ret;
 }
 
@@ -245,13 +234,9 @@
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 
-	DBGPR("-->xgbe_get_pauseparam\n");
-
 	pause->autoneg = pdata->phy.pause_autoneg;
 	pause->tx_pause = pdata->phy.tx_pause;
 	pause->rx_pause = pdata->phy.rx_pause;
-
-	DBGPR("<--xgbe_get_pauseparam\n");
 }
 
 static int xgbe_set_pauseparam(struct net_device *netdev,
@@ -260,13 +245,11 @@
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 	int ret = 0;
 
-	DBGPR("-->xgbe_set_pauseparam\n");
-
-	DBGPR("  autoneg = %d, tx_pause = %d, rx_pause = %d\n",
-	      pause->autoneg, pause->tx_pause, pause->rx_pause);
-
-	if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE))
+	if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
+		netdev_err(netdev,
+			   "autoneg disabled, pause autoneg not avialable\n");
 		return -EINVAL;
+	}
 
 	pdata->phy.pause_autoneg = pause->autoneg;
 	pdata->phy.tx_pause = pause->tx_pause;
@@ -286,8 +269,6 @@
 	if (netif_running(netdev))
 		ret = pdata->phy_if.phy_config_aneg(pdata);
 
-	DBGPR("<--xgbe_set_pauseparam\n");
-
 	return ret;
 }
 
@@ -296,8 +277,6 @@
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 
-	DBGPR("-->xgbe_get_settings\n");
-
 	cmd->phy_address = pdata->phy.address;
 
 	cmd->supported = pdata->phy.supported;
@@ -311,8 +290,6 @@
 	cmd->port = PORT_NONE;
 	cmd->transceiver = XCVR_INTERNAL;
 
-	DBGPR("<--xgbe_get_settings\n");
-
 	return 0;
 }
 
@@ -323,16 +300,20 @@
 	u32 speed;
 	int ret;
 
-	DBGPR("-->xgbe_set_settings\n");
-
 	speed = ethtool_cmd_speed(cmd);
 
-	if (cmd->phy_address != pdata->phy.address)
+	if (cmd->phy_address != pdata->phy.address) {
+		netdev_err(netdev, "invalid phy address %hhu\n",
+			   cmd->phy_address);
 		return -EINVAL;
+	}
 
 	if ((cmd->autoneg != AUTONEG_ENABLE) &&
-	    (cmd->autoneg != AUTONEG_DISABLE))
+	    (cmd->autoneg != AUTONEG_DISABLE)) {
+		netdev_err(netdev, "unsupported autoneg %hhu\n",
+			   cmd->autoneg);
 		return -EINVAL;
+	}
 
 	if (cmd->autoneg == AUTONEG_DISABLE) {
 		switch (speed) {
@@ -341,16 +322,27 @@
 		case SPEED_1000:
 			break;
 		default:
+			netdev_err(netdev, "unsupported speed %u\n", speed);
 			return -EINVAL;
 		}
 
-		if (cmd->duplex != DUPLEX_FULL)
+		if (cmd->duplex != DUPLEX_FULL) {
+			netdev_err(netdev, "unsupported duplex %hhu\n",
+				   cmd->duplex);
 			return -EINVAL;
+		}
 	}
 
+	netif_dbg(pdata, link, netdev,
+		  "requested advertisement %#x, phy supported %#x\n",
+		  cmd->advertising, pdata->phy.supported);
+
 	cmd->advertising &= pdata->phy.supported;
-	if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
+	if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) {
+		netdev_err(netdev,
+			   "unsupported requested advertisement\n");
 		return -EINVAL;
+	}
 
 	ret = 0;
 	pdata->phy.autoneg = cmd->autoneg;
@@ -366,8 +358,6 @@
 	if (netif_running(netdev))
 		ret = pdata->phy_if.phy_config_aneg(pdata);
 
-	DBGPR("<--xgbe_set_settings\n");
-
 	return ret;
 }
 
@@ -388,13 +378,25 @@
 	drvinfo->n_stats = XGBE_STATS_COUNT;
 }
 
+static u32 xgbe_get_msglevel(struct net_device *netdev)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+	return pdata->msg_enable;
+}
+
+static void xgbe_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+	pdata->msg_enable = msglevel;
+}
+
 static int xgbe_get_coalesce(struct net_device *netdev,
 			     struct ethtool_coalesce *ec)
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 
-	DBGPR("-->xgbe_get_coalesce\n");
-
 	memset(ec, 0, sizeof(struct ethtool_coalesce));
 
 	ec->rx_coalesce_usecs = pdata->rx_usecs;
@@ -402,8 +404,6 @@
 
 	ec->tx_max_coalesced_frames = pdata->tx_frames;
 
-	DBGPR("<--xgbe_get_coalesce\n");
-
 	return 0;
 }
 
@@ -415,8 +415,6 @@
 	unsigned int rx_frames, rx_riwt, rx_usecs;
 	unsigned int tx_frames;
 
-	DBGPR("-->xgbe_set_coalesce\n");
-
 	/* Check for not supported parameters  */
 	if ((ec->rx_coalesce_usecs_irq) ||
 	    (ec->rx_max_coalesced_frames_irq) ||
@@ -436,8 +434,10 @@
 	    (ec->rx_max_coalesced_frames_high) ||
 	    (ec->tx_coalesce_usecs_high) ||
 	    (ec->tx_max_coalesced_frames_high) ||
-	    (ec->rate_sample_interval))
+	    (ec->rate_sample_interval)) {
+		netdev_err(netdev, "unsupported coalescing parameter\n");
 		return -EOPNOTSUPP;
+	}
 
 	rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
 	rx_usecs = ec->rx_coalesce_usecs;
@@ -449,13 +449,13 @@
 
 	/* Check the bounds of values for Rx */
 	if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
-		netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
-			     hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
+		netdev_err(netdev, "rx-usec is limited to %d usecs\n",
+			   hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
 		return -EINVAL;
 	}
 	if (rx_frames > pdata->rx_desc_count) {
-		netdev_alert(netdev, "rx-frames is limited to %d frames\n",
-			     pdata->rx_desc_count);
+		netdev_err(netdev, "rx-frames is limited to %d frames\n",
+			   pdata->rx_desc_count);
 		return -EINVAL;
 	}
 
@@ -463,8 +463,8 @@
 
 	/* Check the bounds of values for Tx */
 	if (tx_frames > pdata->tx_desc_count) {
-		netdev_alert(netdev, "tx-frames is limited to %d frames\n",
-			     pdata->tx_desc_count);
+		netdev_err(netdev, "tx-frames is limited to %d frames\n",
+			   pdata->tx_desc_count);
 		return -EINVAL;
 	}
 
@@ -476,8 +476,6 @@
 	pdata->tx_frames = tx_frames;
 	hw_if->config_tx_coalesce(pdata);
 
-	DBGPR("<--xgbe_set_coalesce\n");
-
 	return 0;
 }
 
@@ -539,8 +537,10 @@
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
 	unsigned int ret;
 
-	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+		netdev_err(netdev, "unsupported hash function\n");
 		return -EOPNOTSUPP;
+	}
 
 	if (indir) {
 		ret = hw_if->set_rss_lookup_table(pdata, indir);
@@ -594,6 +594,8 @@
 	.get_settings = xgbe_get_settings,
 	.set_settings = xgbe_set_settings,
 	.get_drvinfo = xgbe_get_drvinfo,
+	.get_msglevel = xgbe_get_msglevel,
+	.set_msglevel = xgbe_set_msglevel,
 	.get_link = ethtool_op_get_link,
 	.get_coalesce = xgbe_get_coalesce,
 	.set_coalesce = xgbe_set_coalesce,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index e83bd76..7dd8933 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -371,7 +371,7 @@
 	set_bit(XGBE_DOWN, &pdata->dev_state);
 
 	/* Check if we should use ACPI or DT */
-	pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
+	pdata->use_acpi = dev->of_node ? 0 : 1;
 
 	phy_pdev = xgbe_get_phy_pdev(pdata);
 	if (!phy_pdev) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 9088c3a..4460580 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1115,8 +1115,7 @@
 	unsigned int reg, link_aneg;
 
 	if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
-		if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
-			netif_carrier_off(pdata->netdev);
+		netif_carrier_off(pdata->netdev);
 
 		pdata->phy.link = 0;
 		goto adjust_link;
@@ -1142,10 +1141,7 @@
 		if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
 			clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
 
-		if (!test_bit(XGBE_LINK, &pdata->dev_state)) {
-			set_bit(XGBE_LINK, &pdata->dev_state);
-			netif_carrier_on(pdata->netdev);
-		}
+		netif_carrier_on(pdata->netdev);
 	} else {
 		if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
 			xgbe_check_link_timeout(pdata);
@@ -1156,10 +1152,7 @@
 
 		xgbe_phy_status_aneg(pdata);
 
-		if (test_bit(XGBE_LINK, &pdata->dev_state)) {
-			clear_bit(XGBE_LINK, &pdata->dev_state);
-			netif_carrier_off(pdata->netdev);
-		}
+		netif_carrier_off(pdata->netdev);
 	}
 
 adjust_link:
@@ -1179,8 +1172,7 @@
 	devm_free_irq(pdata->dev, pdata->an_irq, pdata);
 
 	pdata->phy.link = 0;
-	if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
-		netif_carrier_off(pdata->netdev);
+	netif_carrier_off(pdata->netdev);
 
 	xgbe_phy_adjust_link(pdata);
 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 8c9d01e..e234b99 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -209,8 +209,6 @@
 #define XGMAC_IOCTL_CONTEXT	2
 
 #define XGBE_FIFO_MAX		81920
-#define XGBE_FIFO_SIZE_B(x)	(x)
-#define XGBE_FIFO_SIZE_KB(x)	(x * 1024)
 
 #define XGBE_TC_MIN_QUANTUM	10
 
@@ -461,7 +459,6 @@
 
 enum xgbe_state {
 	XGBE_DOWN,
-	XGBE_LINK,
 	XGBE_LINK_INIT,
 	XGBE_LINK_ERR,
 };
@@ -483,20 +480,6 @@
 	XGMAC_INT_STATE_RESTORE,
 };
 
-enum xgbe_mtl_fifo_size {
-	XGMAC_MTL_FIFO_SIZE_256  = 0x00,
-	XGMAC_MTL_FIFO_SIZE_512  = 0x01,
-	XGMAC_MTL_FIFO_SIZE_1K   = 0x03,
-	XGMAC_MTL_FIFO_SIZE_2K   = 0x07,
-	XGMAC_MTL_FIFO_SIZE_4K   = 0x0f,
-	XGMAC_MTL_FIFO_SIZE_8K   = 0x1f,
-	XGMAC_MTL_FIFO_SIZE_16K  = 0x3f,
-	XGMAC_MTL_FIFO_SIZE_32K  = 0x7f,
-	XGMAC_MTL_FIFO_SIZE_64K  = 0xff,
-	XGMAC_MTL_FIFO_SIZE_128K = 0x1ff,
-	XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
-};
-
 enum xgbe_speed {
 	XGBE_SPEED_1000 = 0,
 	XGBE_SPEED_2500,
@@ -598,6 +581,7 @@
 struct xgbe_ext_stats {
 	u64 tx_tso_packets;
 	u64 rx_split_header_packets;
+	u64 rx_buffer_unavailable;
 };
 
 struct xgbe_hw_if {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 21749f0..652f218 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -690,16 +690,24 @@
 			netdev_dbg(ndev, "No phy-handle found in DT\n");
 			return -ENODEV;
 		}
-		pdata->phy_dev = of_phy_find_device(phy_np);
-	}
 
-	phy_dev = pdata->phy_dev;
+		phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
+					 0, pdata->phy_mode);
+		if (!phy_dev) {
+			netdev_err(ndev, "Could not connect to PHY\n");
+			return -ENODEV;
+		}
 
-	if (!phy_dev ||
-	    phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
-			       pdata->phy_mode)) {
-		netdev_err(ndev, "Could not connect to PHY\n");
-		return  -ENODEV;
+		pdata->phy_dev = phy_dev;
+	} else {
+		phy_dev = pdata->phy_dev;
+
+		if (!phy_dev ||
+		    phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
+				       pdata->phy_mode)) {
+			netdev_err(ndev, "Could not connect to PHY\n");
+			return  -ENODEV;
+		}
 	}
 
 	pdata->phy_speed = SPEED_UNKNOWN;
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
index d19a41b..3107129 100644
--- a/drivers/net/ethernet/apple/Kconfig
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -51,7 +51,7 @@
 	  will be called bmac.
 
 config MACMACE
-	bool "Macintosh (AV) onboard MACE ethernet"
+	tristate "Macintosh (AV) onboard MACE ethernet"
 	depends on MAC
 	select CRC32
 	---help---
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index f9cb99b..ffd1805 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -78,6 +78,7 @@
 	{ .compatible = "snps,arc-emac" },
 	{ /* Sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, emac_arc_dt_ids);
 
 static struct platform_driver emac_arc_driver = {
 	.probe = emac_arc_probe,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index b9a5a97..f1b5364 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2079,6 +2079,7 @@
 	{ .compatible = "brcm,systemport" },
 	{ /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
 
 static struct platform_driver bcm_sysport_driver = {
 	.probe	= bcm_sysport_probe,
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2b66ef3..8fc3f3c 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -813,6 +813,46 @@
 }
 
 static void
+bnx2_free_stats_blk(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (bp->status_blk) {
+		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
+				  bp->status_blk,
+				  bp->status_blk_mapping);
+		bp->status_blk = NULL;
+		bp->stats_blk = NULL;
+	}
+}
+
+static int
+bnx2_alloc_stats_blk(struct net_device *dev)
+{
+	int status_blk_size;
+	void *status_blk;
+	struct bnx2 *bp = netdev_priv(dev);
+
+	/* Combine status and statistics blocks into one allocation. */
+	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
+	if (bp->flags & BNX2_FLAG_MSIX_CAP)
+		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
+						 BNX2_SBLK_MSIX_ALIGN_SIZE);
+	bp->status_stats_size = status_blk_size +
+				sizeof(struct statistics_block);
+	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+					 &bp->status_blk_mapping, GFP_KERNEL);
+	if (status_blk == NULL)
+		return -ENOMEM;
+
+	bp->status_blk = status_blk;
+	bp->stats_blk = status_blk + status_blk_size;
+	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
+
+	return 0;
+}
+
+static void
 bnx2_free_mem(struct bnx2 *bp)
 {
 	int i;
@@ -829,37 +869,19 @@
 			bp->ctx_blk[i] = NULL;
 		}
 	}
-	if (bnapi->status_blk.msi) {
-		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
-				  bnapi->status_blk.msi,
-				  bp->status_blk_mapping);
+
+	if (bnapi->status_blk.msi)
 		bnapi->status_blk.msi = NULL;
-		bp->stats_blk = NULL;
-	}
 }
 
 static int
 bnx2_alloc_mem(struct bnx2 *bp)
 {
-	int i, status_blk_size, err;
+	int i, err;
 	struct bnx2_napi *bnapi;
-	void *status_blk;
-
-	/* Combine status and statistics blocks into one allocation. */
-	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
-	if (bp->flags & BNX2_FLAG_MSIX_CAP)
-		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
-						 BNX2_SBLK_MSIX_ALIGN_SIZE);
-	bp->status_stats_size = status_blk_size +
-				sizeof(struct statistics_block);
-
-	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
-					 &bp->status_blk_mapping, GFP_KERNEL);
-	if (status_blk == NULL)
-		goto alloc_mem_err;
 
 	bnapi = &bp->bnx2_napi[0];
-	bnapi->status_blk.msi = status_blk;
+	bnapi->status_blk.msi = bp->status_blk;
 	bnapi->hw_tx_cons_ptr =
 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
 	bnapi->hw_rx_cons_ptr =
@@ -870,7 +892,7 @@
 
 			bnapi = &bp->bnx2_napi[i];
 
-			sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+			sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
 			bnapi->status_blk.msix = sblk;
 			bnapi->hw_tx_cons_ptr =
 				&sblk->status_tx_quick_consumer_index;
@@ -880,10 +902,6 @@
 		}
 	}
 
-	bp->stats_blk = status_blk + status_blk_size;
-
-	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
-
 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
 		if (bp->ctx_pages == 0)
@@ -8330,6 +8348,11 @@
 
 	bp->phy_addr = 1;
 
+	/* allocate stats_blk */
+	rc = bnx2_alloc_stats_blk(dev);
+	if (rc)
+		goto err_out_unmap;
+
 	/* Disable WOL support if we are running on a SERDES chip. */
 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
 		bnx2_get_5709_media(bp);
@@ -8453,6 +8476,8 @@
 	pci_disable_device(pdev);
 
 err_out:
+	kfree(bp->temp_stats_blk);
+
 	return rc;
 }
 
@@ -8586,6 +8611,7 @@
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 err_free:
+	bnx2_free_stats_blk(dev);
 	free_netdev(dev);
 	return rc;
 }
@@ -8603,6 +8629,7 @@
 
 	pci_iounmap(bp->pdev, bp->regview);
 
+	bnx2_free_stats_blk(dev);
 	kfree(bp->temp_stats_blk);
 
 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index f92f76c..380234d 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6928,6 +6928,7 @@
 
 	dma_addr_t		status_blk_mapping;
 
+	void *status_blk;
 	struct statistics_block	*stats_blk;
 	struct statistics_block	*temp_stats_blk;
 	dma_addr_t		stats_blk_mapping;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index ba93663..b5e64b0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1946,6 +1946,7 @@
 	u16 vlan_cnt;
 	u16 vlan_credit;
 	u16 vxlan_dst_port;
+	u8 vxlan_dst_port_count;
 	bool accept_any_vlan;
 };
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e3da2bd..f1d62d5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3705,16 +3705,14 @@
 
 void bnx2x_update_mfw_dump(struct bnx2x *bp)
 {
-	struct timeval epoc;
 	u32 drv_ver;
 	u32 valid_dump;
 
 	if (!SHMEM2_HAS(bp, drv_info))
 		return;
 
-	/* Update Driver load time */
-	do_gettimeofday(&epoc);
-	SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec);
+	/* Update Driver load time, possibly broken in y2038 */
+	SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
 
 	drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
 	SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
@@ -10110,12 +10108,18 @@
 	if (!netif_running(bp->dev))
 		return;
 
-	if (bp->vxlan_dst_port || !IS_PF(bp)) {
+	if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) {
+		bp->vxlan_dst_port_count++;
+		return;
+	}
+
+	if (bp->vxlan_dst_port_count || !IS_PF(bp)) {
 		DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
 		return;
 	}
 
 	bp->vxlan_dst_port = port;
+	bp->vxlan_dst_port_count = 1;
 	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
 }
 
@@ -10130,10 +10134,14 @@
 
 static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
 {
-	if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) {
+	if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port ||
+	    !IS_PF(bp)) {
 		DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
 		return;
 	}
+	bp->vxlan_dst_port--;
+	if (bp->vxlan_dst_port)
+		return;
 
 	if (netif_running(bp->dev)) {
 		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index c9bd7f1..ff702a7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4319,8 +4319,16 @@
 
 	/* RSS keys */
 	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
-		memcpy(&data->rss_key[0], &p->rss_key[0],
-		       sizeof(data->rss_key));
+		u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key);
+		const u8 *src = (const u8 *)p->rss_key;
+		int i;
+
+		/* Apparently, bnx2x reads this array in reverse order
+		 * We need to byte swap rss_key to comply with Toeplitz specs.
+		 */
+		for (i = 0; i < sizeof(data->rss_key); i++)
+			*--dst = *src++;
+
 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
 	}
 
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 1e72722..1a3988f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -608,8 +608,7 @@
 	 * transmitted, or when the ring is emtpy.
 	 */
 	if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
-	    ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_high ||
-	    ec->tx_coalesce_usecs_low)
+	    ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
 		return -EOPNOTSUPP;
 
 	/* Program all TX queues with the same values, as there is no
@@ -3305,6 +3304,7 @@
 	{ .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, bcmgenet_match);
 
 static int bcmgenet_probe(struct platform_device *pdev)
 {
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b7a0f78..9e59663 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1543,7 +1543,7 @@
 }
 
 /* Flush FLI data fifo. */
-static u32
+static int
 bfa_flash_fifo_flush(void __iomem *pci_bar)
 {
 	u32 i;
@@ -1573,11 +1573,11 @@
 }
 
 /* Read flash status. */
-static u32
+static int
 bfa_flash_status_read(void __iomem *pci_bar)
 {
 	union bfa_flash_dev_status_reg	dev_status;
-	u32				status;
+	int				status;
 	u32			ret_status;
 	int				i;
 
@@ -1611,11 +1611,11 @@
 }
 
 /* Start flash read operation. */
-static u32
+static int
 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
 		     char *buf)
 {
-	u32 status;
+	int status;
 
 	/* len must be mutiple of 4 and not exceeding fifo size */
 	if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
@@ -1703,7 +1703,8 @@
 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
 		   u32 len)
 {
-	u32 n, status;
+	u32 n;
+	int status;
 	u32 off, l, s, residue, fifo_sz;
 
 	residue = len;
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 5d0753c..04b0d16 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -2400,6 +2400,7 @@
 		q0->rcb->id = 0;
 		q0->rx_packets = q0->rx_bytes = 0;
 		q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
+		q0->rxbuf_map_failed = 0;
 
 		bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
 			&dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
@@ -2428,6 +2429,7 @@
 					: rx_cfg->q1_buf_size;
 			q1->rx_packets = q1->rx_bytes = 0;
 			q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
+			q1->rxbuf_map_failed = 0;
 
 			bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
 				&hqpt_mem[i], &hsqpt_mem[i],
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index e0e797f..c438d03 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -587,6 +587,7 @@
 	u64		rx_bytes;
 	u64		rx_packets_with_error;
 	u64		rxbuf_alloc_failed;
+	u64		rxbuf_map_failed;
 };
 
 /* RxQ pair */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 506047c..21a0cfc 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -399,7 +399,13 @@
 		}
 
 		dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
-				unmap_q->map_size, DMA_FROM_DEVICE);
+					unmap_q->map_size, DMA_FROM_DEVICE);
+		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
+			put_page(page);
+			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
+			rcb->rxq->rxbuf_map_failed++;
+			goto finishing;
+		}
 
 		unmap->page = page;
 		unmap->page_offset = page_offset;
@@ -454,8 +460,15 @@
 			rcb->rxq->rxbuf_alloc_failed++;
 			goto finishing;
 		}
+
 		dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
 					  buff_sz, DMA_FROM_DEVICE);
+		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
+			dev_kfree_skb_any(skb);
+			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
+			rcb->rxq->rxbuf_map_failed++;
+			goto finishing;
+		}
 
 		unmap->skb = skb;
 		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
@@ -3025,6 +3038,11 @@
 	unmap = head_unmap;
 	dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
 				  len, DMA_TO_DEVICE);
+	if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
+		dev_kfree_skb_any(skb);
+		BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
+		return NETDEV_TX_OK;
+	}
 	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
 	txqent->vector[0].length = htons(len);
 	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
@@ -3056,6 +3074,15 @@
 
 		dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
 					    0, size, DMA_TO_DEVICE);
+		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
+			/* Undo the changes starting at tcb->producer_index */
+			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
+					   tcb->producer_index);
+			dev_kfree_skb_any(skb);
+			BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
+			return NETDEV_TX_OK;
+		}
+
 		dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
 		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
 		txqent->vector[vect_id].length = htons(size);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index faedbf2..f4ed816 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -175,6 +175,7 @@
 	u64		tx_skb_headlen_zero;
 	u64		tx_skb_frag_zero;
 	u64		tx_skb_len_mismatch;
+	u64		tx_skb_map_failed;
 
 	u64		hw_stats_updates;
 	u64		netif_rx_dropped;
@@ -189,6 +190,7 @@
 	u64		rx_unmap_q_alloc_failed;
 
 	u64		rxbuf_alloc_failed;
+	u64		rxbuf_map_failed;
 };
 
 /* Complete driver stats */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 2bdfc5d..0e4fdc3 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -90,6 +90,7 @@
 	"tx_skb_headlen_zero",
 	"tx_skb_frag_zero",
 	"tx_skb_len_mismatch",
+	"tx_skb_map_failed",
 	"hw_stats_updates",
 	"netif_rx_dropped",
 
@@ -102,6 +103,7 @@
 	"tx_unmap_q_alloc_failed",
 	"rx_unmap_q_alloc_failed",
 	"rxbuf_alloc_failed",
+	"rxbuf_map_failed",
 
 	"mac_stats_clr_cnt",
 	"mac_frame_64",
@@ -807,6 +809,7 @@
 							rx_packets_with_error;
 					buf[bi++] = rcb->rxq->
 							rxbuf_alloc_failed;
+					buf[bi++] = rcb->rxq->rxbuf_map_failed;
 					buf[bi++] = rcb->producer_index;
 					buf[bi++] = rcb->consumer_index;
 				}
@@ -821,6 +824,7 @@
 							rx_packets_with_error;
 					buf[bi++] = rcb->rxq->
 							rxbuf_alloc_failed;
+					buf[bi++] = rcb->rxq->rxbuf_map_failed;
 					buf[bi++] = rcb->producer_index;
 					buf[bi++] = rcb->consumer_index;
 				}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index fa0c7b5..634e50c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -47,6 +47,7 @@
 #include <linux/timer.h>
 #include <linux/vmalloc.h>
 #include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
 #include <asm/io.h>
 #include "cxgb4_uld.h"
 
@@ -478,6 +479,8 @@
 #ifdef CONFIG_CHELSIO_T4_FCOE
 	struct cxgb_fcoe fcoe;
 #endif /* CONFIG_CHELSIO_T4_FCOE */
+	bool rxtstamp;  /* Enable TS */
+	struct hwtstamp_config tstamp_config;
 };
 
 struct dentry;
@@ -517,6 +520,7 @@
 
 /* A packet gather list */
 struct pkt_gl {
+	u64 sgetstamp;		    /* SGE Time Stamp for Ingress Packet */
 	struct page_frag frags[MAX_SKB_FRAGS];
 	void *va;                         /* virtual address of first byte */
 	unsigned int nfrags;              /* # of fragments */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0a87a32..4269944 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -940,6 +940,7 @@
 
 static const char * const devlog_facility_strings[] = {
 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
+	[FW_DEVLOG_FACILITY_CF]         = "CF",
 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
 	[FW_DEVLOG_FACILITY_RES]	= "RES",
@@ -1128,18 +1129,26 @@
 static int mbox_show(struct seq_file *seq, void *v)
 {
 	static const char * const owner[] = { "none", "FW", "driver",
-					      "unknown" };
+					      "unknown", "<unread>" };
 
 	int i;
 	unsigned int mbox = (uintptr_t)seq->private & 7;
 	struct adapter *adap = seq->private - mbox;
 	void __iomem *addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
-	unsigned int ctrl_reg = (is_t4(adap->params.chip)
-				 ? CIM_PF_MAILBOX_CTRL_A
-				 : CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A);
-	void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
 
-	i = MBOWNER_G(readl(ctrl));
+	/* For T4 we don't have a shadow copy of the Mailbox Control register.
+	 * And since reading that real register causes a side effect of
+	 * granting ownership, we're best of simply not reading it at all.
+	 */
+	if (is_t4(adap->params.chip)) {
+		i = 4; /* index of "<unread>" */
+	} else {
+		unsigned int ctrl_reg = CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A;
+		void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
+
+		i = MBOWNER_G(readl(ctrl));
+	}
+
 	seq_printf(seq, "mailbox owned by %s\n\n", owner[i]);
 
 	for (i = 0; i < MBOX_LEN; i += 8)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 5eedb98..02e4e02 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -961,6 +961,20 @@
 	return ret;
 }
 
+static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
+{
+	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+				   SOF_TIMESTAMPING_RX_SOFTWARE |
+				   SOF_TIMESTAMPING_SOFTWARE;
+
+	ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
+				    SOF_TIMESTAMPING_RAW_HARDWARE;
+
+	ts_info->phc_index = -1;
+
+	return 0;
+}
+
 static u32 get_rss_table_size(struct net_device *dev)
 {
 	const struct port_info *pi = netdev_priv(dev);
@@ -1095,6 +1109,7 @@
 	.get_rxfh	   = get_rss_table,
 	.set_rxfh	   = set_rss_table,
 	.flash_device      = set_flash,
+	.get_ts_info       = get_ts_info
 };
 
 void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index f5dcde2..c29227e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -275,7 +275,7 @@
 	else {
 		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
 
-		const char *s = "10Mbps";
+		const char *s;
 		const struct port_info *p = netdev_priv(dev);
 
 		switch (p->link_cfg.speed) {
@@ -291,6 +291,10 @@
 		case 40000:
 			s = "40Gbps";
 			break;
+		default:
+			pr_info("%s: unsupported speed: %d\n",
+				dev->name, p->link_cfg.speed);
+			return;
 		}
 
 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
@@ -2959,6 +2963,30 @@
 			ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
 					 data->reg_num, data->val_in);
 		break;
+	case SIOCGHWTSTAMP:
+		return copy_to_user(req->ifr_data, &pi->tstamp_config,
+				    sizeof(pi->tstamp_config)) ?
+			-EFAULT : 0;
+	case SIOCSHWTSTAMP:
+		if (copy_from_user(&pi->tstamp_config, req->ifr_data,
+				   sizeof(pi->tstamp_config)))
+			return -EFAULT;
+
+		switch (pi->tstamp_config.rx_filter) {
+		case HWTSTAMP_FILTER_NONE:
+			pi->rxtstamp = false;
+			break;
+		case HWTSTAMP_FILTER_ALL:
+			pi->rxtstamp = true;
+			break;
+		default:
+			pi->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+			return -ERANGE;
+		}
+
+		return copy_to_user(req->ifr_data, &pi->tstamp_config,
+				    sizeof(pi->tstamp_config)) ?
+			-EFAULT : 0;
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -3670,7 +3698,7 @@
 	t4_get_tp_version(adap, &adap->params.tp_vers);
 	ret = t4_check_fw_version(adap);
 	/* If firmware is too old (not supported by driver) force an update. */
-	if (ret == -EFAULT)
+	if (ret)
 		state = DEV_STATE_UNINIT;
 	if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
 		struct fw_info *fw_info;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 9162746..b7b93e7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1820,11 +1820,34 @@
 	return 0;
 }
 
+/**
+ * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
+ * @adap: the adapter
+ * @hwtstamps: time stamp structure to update
+ * @sgetstamp: 60bit iqe timestamp
+ *
+ * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
+ * which is in Core Clock ticks into ktime_t and assign it
+ **/
+static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
+				     struct skb_shared_hwtstamps *hwtstamps,
+				     u64 sgetstamp)
+{
+	u64 ns;
+	u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
+
+	ns = div_u64(tmp, adap->params.vpd.cclk);
+
+	memset(hwtstamps, 0, sizeof(*hwtstamps));
+	hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
 		   const struct cpl_rx_pkt *pkt)
 {
 	struct adapter *adapter = rxq->rspq.adap;
 	struct sge *s = &adapter->sge;
+	struct port_info *pi;
 	int ret;
 	struct sk_buff *skb;
 
@@ -1842,6 +1865,10 @@
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
 	skb_record_rx_queue(skb, rxq->rspq.idx);
 	skb_mark_napi_id(skb, &rxq->rspq.napi);
+	pi = netdev_priv(skb->dev);
+	if (pi->rxtstamp)
+		cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
+					 gl->sgetstamp);
 	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
 			     PKT_HASH_TYPE_L3);
@@ -1877,9 +1904,7 @@
 	struct sge *s = &q->adap->sge;
 	int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
 			    CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
-#ifdef CONFIG_CHELSIO_T4_FCOE
 	struct port_info *pi;
-#endif
 
 	if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
 		return handle_trace_pkt(q->adap, si);
@@ -1910,6 +1935,10 @@
 
 	rxq->stats.pkts++;
 
+	pi = netdev_priv(skb->dev);
+	if (pi->rxtstamp)
+		cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
+					 si->sgetstamp);
 	if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
 		if (!pkt->ip_frag) {
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1926,7 +1955,6 @@
 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
 			  RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
 
-		pi = netdev_priv(skb->dev);
 		if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
 			if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
 			    (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
@@ -2067,6 +2095,8 @@
 				unmap_rx_buf(q->adap, &rxq->fl);
 			}
 
+			si.sgetstamp = SGE_TIMESTAMP_G(
+					be64_to_cpu(rc->last_flit));
 			/*
 			 * Last buffer remains mapped so explicitly make it
 			 * coherent for CPU access.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4480625..cf61a58 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -699,50 +699,107 @@
 {
 	static const unsigned int t4_reg_ranges[] = {
 		0x1008, 0x1108,
-		0x1180, 0x11b4,
+		0x1180, 0x1184,
+		0x1190, 0x1194,
+		0x11a0, 0x11a4,
+		0x11b0, 0x11b4,
 		0x11fc, 0x123c,
 		0x1300, 0x173c,
 		0x1800, 0x18fc,
-		0x3000, 0x305c,
-		0x3068, 0x30d8,
-		0x30e0, 0x5924,
-		0x5960, 0x59d4,
-		0x5a00, 0x5af8,
+		0x3000, 0x30d8,
+		0x30e0, 0x30e4,
+		0x30ec, 0x5910,
+		0x5920, 0x5924,
+		0x5960, 0x5960,
+		0x5968, 0x5968,
+		0x5970, 0x5970,
+		0x5978, 0x5978,
+		0x5980, 0x5980,
+		0x5988, 0x5988,
+		0x5990, 0x5990,
+		0x5998, 0x5998,
+		0x59a0, 0x59d4,
+		0x5a00, 0x5ae0,
+		0x5ae8, 0x5ae8,
+		0x5af0, 0x5af0,
+		0x5af8, 0x5af8,
 		0x6000, 0x6098,
 		0x6100, 0x6150,
 		0x6200, 0x6208,
 		0x6240, 0x6248,
-		0x6280, 0x6338,
+		0x6280, 0x62b0,
+		0x62c0, 0x6338,
 		0x6370, 0x638c,
 		0x6400, 0x643c,
 		0x6500, 0x6524,
-		0x6a00, 0x6a38,
-		0x6a60, 0x6a78,
-		0x6b00, 0x6b84,
-		0x6bf0, 0x6c84,
-		0x6cf0, 0x6d84,
-		0x6df0, 0x6e84,
-		0x6ef0, 0x6f84,
-		0x6ff0, 0x7084,
-		0x70f0, 0x7184,
-		0x71f0, 0x7284,
-		0x72f0, 0x7384,
-		0x73f0, 0x7450,
+		0x6a00, 0x6a04,
+		0x6a14, 0x6a38,
+		0x6a60, 0x6a70,
+		0x6a78, 0x6a78,
+		0x6b00, 0x6b0c,
+		0x6b1c, 0x6b84,
+		0x6bf0, 0x6bf8,
+		0x6c00, 0x6c0c,
+		0x6c1c, 0x6c84,
+		0x6cf0, 0x6cf8,
+		0x6d00, 0x6d0c,
+		0x6d1c, 0x6d84,
+		0x6df0, 0x6df8,
+		0x6e00, 0x6e0c,
+		0x6e1c, 0x6e84,
+		0x6ef0, 0x6ef8,
+		0x6f00, 0x6f0c,
+		0x6f1c, 0x6f84,
+		0x6ff0, 0x6ff8,
+		0x7000, 0x700c,
+		0x701c, 0x7084,
+		0x70f0, 0x70f8,
+		0x7100, 0x710c,
+		0x711c, 0x7184,
+		0x71f0, 0x71f8,
+		0x7200, 0x720c,
+		0x721c, 0x7284,
+		0x72f0, 0x72f8,
+		0x7300, 0x730c,
+		0x731c, 0x7384,
+		0x73f0, 0x73f8,
+		0x7400, 0x7450,
 		0x7500, 0x7530,
-		0x7600, 0x761c,
+		0x7600, 0x760c,
+		0x7614, 0x761c,
 		0x7680, 0x76cc,
 		0x7700, 0x7798,
 		0x77c0, 0x77fc,
 		0x7900, 0x79fc,
-		0x7b00, 0x7c38,
-		0x7d00, 0x7efc,
-		0x8dc0, 0x8e1c,
+		0x7b00, 0x7b58,
+		0x7b60, 0x7b84,
+		0x7b8c, 0x7c38,
+		0x7d00, 0x7d38,
+		0x7d40, 0x7d80,
+		0x7d8c, 0x7ddc,
+		0x7de4, 0x7e04,
+		0x7e10, 0x7e1c,
+		0x7e24, 0x7e38,
+		0x7e40, 0x7e44,
+		0x7e4c, 0x7e78,
+		0x7e80, 0x7ea4,
+		0x7eac, 0x7edc,
+		0x7ee8, 0x7efc,
+		0x8dc0, 0x8e04,
+		0x8e10, 0x8e1c,
 		0x8e30, 0x8e78,
-		0x8ea0, 0x8f6c,
-		0x8fc0, 0x9074,
+		0x8ea0, 0x8eb8,
+		0x8ec0, 0x8f6c,
+		0x8fc0, 0x9008,
+		0x9010, 0x9058,
+		0x9060, 0x9060,
+		0x9068, 0x9074,
 		0x90fc, 0x90fc,
-		0x9400, 0x9458,
-		0x9600, 0x96bc,
+		0x9400, 0x9408,
+		0x9410, 0x9458,
+		0x9600, 0x9600,
+		0x9608, 0x9638,
+		0x9640, 0x96bc,
 		0x9800, 0x9808,
 		0x9820, 0x983c,
 		0x9850, 0x9864,
@@ -754,23 +811,42 @@
 		0x9e80, 0x9eec,
 		0x9f00, 0x9f6c,
 		0x9f80, 0x9fec,
-		0xd004, 0xd03c,
+		0xd004, 0xd004,
+		0xd010, 0xd03c,
 		0xdfc0, 0xdfe0,
 		0xe000, 0xea7c,
-		0xf000, 0x11110,
-		0x11118, 0x11190,
+		0xf000, 0x11190,
 		0x19040, 0x1906c,
 		0x19078, 0x19080,
-		0x1908c, 0x19124,
-		0x19150, 0x191b0,
+		0x1908c, 0x190e4,
+		0x190f0, 0x190f8,
+		0x19100, 0x19110,
+		0x19120, 0x19124,
+		0x19150, 0x19194,
+		0x1919c, 0x191b0,
 		0x191d0, 0x191e8,
 		0x19238, 0x1924c,
-		0x193f8, 0x19474,
-		0x19490, 0x194f8,
-		0x19800, 0x19f4c,
-		0x1a000, 0x1a06c,
-		0x1a0b0, 0x1a120,
-		0x1a128, 0x1a138,
+		0x193f8, 0x1943c,
+		0x1944c, 0x19474,
+		0x19490, 0x194e0,
+		0x194f0, 0x194f8,
+		0x19800, 0x19c08,
+		0x19c10, 0x19c90,
+		0x19ca0, 0x19ce4,
+		0x19cf0, 0x19d40,
+		0x19d50, 0x19d94,
+		0x19da0, 0x19de8,
+		0x19df0, 0x19e40,
+		0x19e50, 0x19e90,
+		0x19ea0, 0x19f4c,
+		0x1a000, 0x1a004,
+		0x1a010, 0x1a06c,
+		0x1a0b0, 0x1a0e4,
+		0x1a0ec, 0x1a0f4,
+		0x1a100, 0x1a108,
+		0x1a114, 0x1a120,
+		0x1a128, 0x1a130,
+		0x1a138, 0x1a138,
 		0x1a190, 0x1a1c4,
 		0x1a1fc, 0x1a1fc,
 		0x1e040, 0x1e04c,
@@ -823,9 +899,12 @@
 		0x1ffc0, 0x1ffc8,
 		0x20000, 0x2002c,
 		0x20100, 0x2013c,
-		0x20190, 0x201c8,
+		0x20190, 0x201a0,
+		0x201a8, 0x201b8,
+		0x201c4, 0x201c8,
 		0x20200, 0x20318,
-		0x20400, 0x20528,
+		0x20400, 0x204b4,
+		0x204c0, 0x20528,
 		0x20540, 0x20614,
 		0x21000, 0x21040,
 		0x2104c, 0x21060,
@@ -834,22 +913,62 @@
 		0x21270, 0x21284,
 		0x212fc, 0x21388,
 		0x21400, 0x21404,
-		0x21500, 0x21518,
-		0x2152c, 0x2153c,
+		0x21500, 0x21500,
+		0x21510, 0x21518,
+		0x2152c, 0x21530,
+		0x2153c, 0x2153c,
 		0x21550, 0x21554,
 		0x21600, 0x21600,
-		0x21608, 0x21628,
-		0x21630, 0x2163c,
+		0x21608, 0x2161c,
+		0x21624, 0x21628,
+		0x21630, 0x21634,
+		0x2163c, 0x2163c,
 		0x21700, 0x2171c,
 		0x21780, 0x2178c,
-		0x21800, 0x21c38,
-		0x21c80, 0x21d7c,
+		0x21800, 0x21818,
+		0x21820, 0x21828,
+		0x21830, 0x21848,
+		0x21850, 0x21854,
+		0x21860, 0x21868,
+		0x21870, 0x21870,
+		0x21878, 0x21898,
+		0x218a0, 0x218a8,
+		0x218b0, 0x218c8,
+		0x218d0, 0x218d4,
+		0x218e0, 0x218e8,
+		0x218f0, 0x218f0,
+		0x218f8, 0x21a18,
+		0x21a20, 0x21a28,
+		0x21a30, 0x21a48,
+		0x21a50, 0x21a54,
+		0x21a60, 0x21a68,
+		0x21a70, 0x21a70,
+		0x21a78, 0x21a98,
+		0x21aa0, 0x21aa8,
+		0x21ab0, 0x21ac8,
+		0x21ad0, 0x21ad4,
+		0x21ae0, 0x21ae8,
+		0x21af0, 0x21af0,
+		0x21af8, 0x21c18,
+		0x21c20, 0x21c20,
+		0x21c28, 0x21c30,
+		0x21c38, 0x21c38,
+		0x21c80, 0x21c98,
+		0x21ca0, 0x21ca8,
+		0x21cb0, 0x21cc8,
+		0x21cd0, 0x21cd4,
+		0x21ce0, 0x21ce8,
+		0x21cf0, 0x21cf0,
+		0x21cf8, 0x21d7c,
 		0x21e00, 0x21e04,
 		0x22000, 0x2202c,
 		0x22100, 0x2213c,
-		0x22190, 0x221c8,
+		0x22190, 0x221a0,
+		0x221a8, 0x221b8,
+		0x221c4, 0x221c8,
 		0x22200, 0x22318,
-		0x22400, 0x22528,
+		0x22400, 0x224b4,
+		0x224c0, 0x22528,
 		0x22540, 0x22614,
 		0x23000, 0x23040,
 		0x2304c, 0x23060,
@@ -858,22 +977,62 @@
 		0x23270, 0x23284,
 		0x232fc, 0x23388,
 		0x23400, 0x23404,
-		0x23500, 0x23518,
-		0x2352c, 0x2353c,
+		0x23500, 0x23500,
+		0x23510, 0x23518,
+		0x2352c, 0x23530,
+		0x2353c, 0x2353c,
 		0x23550, 0x23554,
 		0x23600, 0x23600,
-		0x23608, 0x23628,
-		0x23630, 0x2363c,
+		0x23608, 0x2361c,
+		0x23624, 0x23628,
+		0x23630, 0x23634,
+		0x2363c, 0x2363c,
 		0x23700, 0x2371c,
 		0x23780, 0x2378c,
-		0x23800, 0x23c38,
-		0x23c80, 0x23d7c,
+		0x23800, 0x23818,
+		0x23820, 0x23828,
+		0x23830, 0x23848,
+		0x23850, 0x23854,
+		0x23860, 0x23868,
+		0x23870, 0x23870,
+		0x23878, 0x23898,
+		0x238a0, 0x238a8,
+		0x238b0, 0x238c8,
+		0x238d0, 0x238d4,
+		0x238e0, 0x238e8,
+		0x238f0, 0x238f0,
+		0x238f8, 0x23a18,
+		0x23a20, 0x23a28,
+		0x23a30, 0x23a48,
+		0x23a50, 0x23a54,
+		0x23a60, 0x23a68,
+		0x23a70, 0x23a70,
+		0x23a78, 0x23a98,
+		0x23aa0, 0x23aa8,
+		0x23ab0, 0x23ac8,
+		0x23ad0, 0x23ad4,
+		0x23ae0, 0x23ae8,
+		0x23af0, 0x23af0,
+		0x23af8, 0x23c18,
+		0x23c20, 0x23c20,
+		0x23c28, 0x23c30,
+		0x23c38, 0x23c38,
+		0x23c80, 0x23c98,
+		0x23ca0, 0x23ca8,
+		0x23cb0, 0x23cc8,
+		0x23cd0, 0x23cd4,
+		0x23ce0, 0x23ce8,
+		0x23cf0, 0x23cf0,
+		0x23cf8, 0x23d7c,
 		0x23e00, 0x23e04,
 		0x24000, 0x2402c,
 		0x24100, 0x2413c,
-		0x24190, 0x241c8,
+		0x24190, 0x241a0,
+		0x241a8, 0x241b8,
+		0x241c4, 0x241c8,
 		0x24200, 0x24318,
-		0x24400, 0x24528,
+		0x24400, 0x244b4,
+		0x244c0, 0x24528,
 		0x24540, 0x24614,
 		0x25000, 0x25040,
 		0x2504c, 0x25060,
@@ -882,22 +1041,62 @@
 		0x25270, 0x25284,
 		0x252fc, 0x25388,
 		0x25400, 0x25404,
-		0x25500, 0x25518,
-		0x2552c, 0x2553c,
+		0x25500, 0x25500,
+		0x25510, 0x25518,
+		0x2552c, 0x25530,
+		0x2553c, 0x2553c,
 		0x25550, 0x25554,
 		0x25600, 0x25600,
-		0x25608, 0x25628,
-		0x25630, 0x2563c,
+		0x25608, 0x2561c,
+		0x25624, 0x25628,
+		0x25630, 0x25634,
+		0x2563c, 0x2563c,
 		0x25700, 0x2571c,
 		0x25780, 0x2578c,
-		0x25800, 0x25c38,
-		0x25c80, 0x25d7c,
+		0x25800, 0x25818,
+		0x25820, 0x25828,
+		0x25830, 0x25848,
+		0x25850, 0x25854,
+		0x25860, 0x25868,
+		0x25870, 0x25870,
+		0x25878, 0x25898,
+		0x258a0, 0x258a8,
+		0x258b0, 0x258c8,
+		0x258d0, 0x258d4,
+		0x258e0, 0x258e8,
+		0x258f0, 0x258f0,
+		0x258f8, 0x25a18,
+		0x25a20, 0x25a28,
+		0x25a30, 0x25a48,
+		0x25a50, 0x25a54,
+		0x25a60, 0x25a68,
+		0x25a70, 0x25a70,
+		0x25a78, 0x25a98,
+		0x25aa0, 0x25aa8,
+		0x25ab0, 0x25ac8,
+		0x25ad0, 0x25ad4,
+		0x25ae0, 0x25ae8,
+		0x25af0, 0x25af0,
+		0x25af8, 0x25c18,
+		0x25c20, 0x25c20,
+		0x25c28, 0x25c30,
+		0x25c38, 0x25c38,
+		0x25c80, 0x25c98,
+		0x25ca0, 0x25ca8,
+		0x25cb0, 0x25cc8,
+		0x25cd0, 0x25cd4,
+		0x25ce0, 0x25ce8,
+		0x25cf0, 0x25cf0,
+		0x25cf8, 0x25d7c,
 		0x25e00, 0x25e04,
 		0x26000, 0x2602c,
 		0x26100, 0x2613c,
-		0x26190, 0x261c8,
+		0x26190, 0x261a0,
+		0x261a8, 0x261b8,
+		0x261c4, 0x261c8,
 		0x26200, 0x26318,
-		0x26400, 0x26528,
+		0x26400, 0x264b4,
+		0x264c0, 0x26528,
 		0x26540, 0x26614,
 		0x27000, 0x27040,
 		0x2704c, 0x27060,
@@ -906,51 +1105,120 @@
 		0x27270, 0x27284,
 		0x272fc, 0x27388,
 		0x27400, 0x27404,
-		0x27500, 0x27518,
-		0x2752c, 0x2753c,
+		0x27500, 0x27500,
+		0x27510, 0x27518,
+		0x2752c, 0x27530,
+		0x2753c, 0x2753c,
 		0x27550, 0x27554,
 		0x27600, 0x27600,
-		0x27608, 0x27628,
-		0x27630, 0x2763c,
+		0x27608, 0x2761c,
+		0x27624, 0x27628,
+		0x27630, 0x27634,
+		0x2763c, 0x2763c,
 		0x27700, 0x2771c,
 		0x27780, 0x2778c,
-		0x27800, 0x27c38,
-		0x27c80, 0x27d7c,
+		0x27800, 0x27818,
+		0x27820, 0x27828,
+		0x27830, 0x27848,
+		0x27850, 0x27854,
+		0x27860, 0x27868,
+		0x27870, 0x27870,
+		0x27878, 0x27898,
+		0x278a0, 0x278a8,
+		0x278b0, 0x278c8,
+		0x278d0, 0x278d4,
+		0x278e0, 0x278e8,
+		0x278f0, 0x278f0,
+		0x278f8, 0x27a18,
+		0x27a20, 0x27a28,
+		0x27a30, 0x27a48,
+		0x27a50, 0x27a54,
+		0x27a60, 0x27a68,
+		0x27a70, 0x27a70,
+		0x27a78, 0x27a98,
+		0x27aa0, 0x27aa8,
+		0x27ab0, 0x27ac8,
+		0x27ad0, 0x27ad4,
+		0x27ae0, 0x27ae8,
+		0x27af0, 0x27af0,
+		0x27af8, 0x27c18,
+		0x27c20, 0x27c20,
+		0x27c28, 0x27c30,
+		0x27c38, 0x27c38,
+		0x27c80, 0x27c98,
+		0x27ca0, 0x27ca8,
+		0x27cb0, 0x27cc8,
+		0x27cd0, 0x27cd4,
+		0x27ce0, 0x27ce8,
+		0x27cf0, 0x27cf0,
+		0x27cf8, 0x27d7c,
 		0x27e00, 0x27e04,
 	};
 
 	static const unsigned int t5_reg_ranges[] = {
-		0x1008, 0x1148,
-		0x1180, 0x11b4,
+		0x1008, 0x10c0,
+		0x10cc, 0x10f8,
+		0x1100, 0x1100,
+		0x110c, 0x1148,
+		0x1180, 0x1184,
+		0x1190, 0x1194,
+		0x11a0, 0x11a4,
+		0x11b0, 0x11b4,
 		0x11fc, 0x123c,
 		0x1280, 0x173c,
 		0x1800, 0x18fc,
 		0x3000, 0x3028,
-		0x3068, 0x30d8,
+		0x3060, 0x30b0,
+		0x30b8, 0x30d8,
 		0x30e0, 0x30fc,
 		0x3140, 0x357c,
 		0x35a8, 0x35cc,
 		0x35ec, 0x35ec,
 		0x3600, 0x5624,
-		0x56cc, 0x575c,
+		0x56cc, 0x56ec,
+		0x56f4, 0x5720,
+		0x5728, 0x575c,
 		0x580c, 0x5814,
-		0x5890, 0x58bc,
-		0x5940, 0x59dc,
+		0x5890, 0x589c,
+		0x58a4, 0x58ac,
+		0x58b8, 0x58bc,
+		0x5940, 0x59c8,
+		0x59d0, 0x59dc,
 		0x59fc, 0x5a18,
-		0x5a60, 0x5a9c,
+		0x5a60, 0x5a70,
+		0x5a80, 0x5a9c,
 		0x5b94, 0x5bfc,
-		0x6000, 0x6040,
-		0x6058, 0x614c,
+		0x6000, 0x6020,
+		0x6028, 0x6040,
+		0x6058, 0x609c,
+		0x60a8, 0x614c,
 		0x7700, 0x7798,
 		0x77c0, 0x78fc,
-		0x7b00, 0x7c54,
-		0x7d00, 0x7efc,
+		0x7b00, 0x7b58,
+		0x7b60, 0x7b84,
+		0x7b8c, 0x7c54,
+		0x7d00, 0x7d38,
+		0x7d40, 0x7d80,
+		0x7d8c, 0x7ddc,
+		0x7de4, 0x7e04,
+		0x7e10, 0x7e1c,
+		0x7e24, 0x7e38,
+		0x7e40, 0x7e44,
+		0x7e4c, 0x7e78,
+		0x7e80, 0x7edc,
+		0x7ee8, 0x7efc,
 		0x8dc0, 0x8de0,
-		0x8df8, 0x8e84,
+		0x8df8, 0x8e04,
+		0x8e10, 0x8e84,
 		0x8ea0, 0x8f84,
-		0x8fc0, 0x90f8,
-		0x9400, 0x9470,
-		0x9600, 0x96f4,
+		0x8fc0, 0x9058,
+		0x9060, 0x9060,
+		0x9068, 0x90f8,
+		0x9400, 0x9408,
+		0x9410, 0x9470,
+		0x9600, 0x9600,
+		0x9608, 0x9638,
+		0x9640, 0x96f4,
 		0x9800, 0x9808,
 		0x9820, 0x983c,
 		0x9850, 0x9864,
@@ -962,103 +1230,143 @@
 		0x9e80, 0x9eec,
 		0x9f00, 0x9f6c,
 		0x9f80, 0xa020,
-		0xd004, 0xd03c,
+		0xd004, 0xd004,
+		0xd010, 0xd03c,
 		0xdfc0, 0xdfe0,
-		0xe000, 0x11088,
-		0x1109c, 0x11110,
-		0x11118, 0x1117c,
+		0xe000, 0x1106c,
+		0x11074, 0x11088,
+		0x1109c, 0x1117c,
 		0x11190, 0x11204,
 		0x19040, 0x1906c,
 		0x19078, 0x19080,
-		0x1908c, 0x19124,
-		0x19150, 0x191b0,
+		0x1908c, 0x190e8,
+		0x190f0, 0x190f8,
+		0x19100, 0x19110,
+		0x19120, 0x19124,
+		0x19150, 0x19194,
+		0x1919c, 0x191b0,
 		0x191d0, 0x191e8,
 		0x19238, 0x19290,
-		0x193f8, 0x19474,
+		0x193f8, 0x19428,
+		0x19430, 0x19444,
+		0x1944c, 0x1946c,
+		0x19474, 0x19474,
 		0x19490, 0x194cc,
 		0x194f0, 0x194f8,
-		0x19c00, 0x19c60,
-		0x19c94, 0x19e10,
-		0x19e50, 0x19f34,
+		0x19c00, 0x19c08,
+		0x19c10, 0x19c60,
+		0x19c94, 0x19ce4,
+		0x19cf0, 0x19d40,
+		0x19d50, 0x19d94,
+		0x19da0, 0x19de8,
+		0x19df0, 0x19e10,
+		0x19e50, 0x19e90,
+		0x19ea0, 0x19f24,
+		0x19f34, 0x19f34,
 		0x19f40, 0x19f50,
-		0x19f90, 0x19fe4,
-		0x1a000, 0x1a06c,
-		0x1a0b0, 0x1a120,
-		0x1a128, 0x1a138,
+		0x19f90, 0x19fb4,
+		0x19fc4, 0x19fe4,
+		0x1a000, 0x1a004,
+		0x1a010, 0x1a06c,
+		0x1a0b0, 0x1a0e4,
+		0x1a0ec, 0x1a0f8,
+		0x1a100, 0x1a108,
+		0x1a114, 0x1a120,
+		0x1a128, 0x1a130,
+		0x1a138, 0x1a138,
 		0x1a190, 0x1a1c4,
 		0x1a1fc, 0x1a1fc,
 		0x1e008, 0x1e00c,
-		0x1e040, 0x1e04c,
+		0x1e040, 0x1e044,
+		0x1e04c, 0x1e04c,
 		0x1e284, 0x1e290,
 		0x1e2c0, 0x1e2c0,
 		0x1e2e0, 0x1e2e0,
 		0x1e300, 0x1e384,
 		0x1e3c0, 0x1e3c8,
 		0x1e408, 0x1e40c,
-		0x1e440, 0x1e44c,
+		0x1e440, 0x1e444,
+		0x1e44c, 0x1e44c,
 		0x1e684, 0x1e690,
 		0x1e6c0, 0x1e6c0,
 		0x1e6e0, 0x1e6e0,
 		0x1e700, 0x1e784,
 		0x1e7c0, 0x1e7c8,
 		0x1e808, 0x1e80c,
-		0x1e840, 0x1e84c,
+		0x1e840, 0x1e844,
+		0x1e84c, 0x1e84c,
 		0x1ea84, 0x1ea90,
 		0x1eac0, 0x1eac0,
 		0x1eae0, 0x1eae0,
 		0x1eb00, 0x1eb84,
 		0x1ebc0, 0x1ebc8,
 		0x1ec08, 0x1ec0c,
-		0x1ec40, 0x1ec4c,
+		0x1ec40, 0x1ec44,
+		0x1ec4c, 0x1ec4c,
 		0x1ee84, 0x1ee90,
 		0x1eec0, 0x1eec0,
 		0x1eee0, 0x1eee0,
 		0x1ef00, 0x1ef84,
 		0x1efc0, 0x1efc8,
 		0x1f008, 0x1f00c,
-		0x1f040, 0x1f04c,
+		0x1f040, 0x1f044,
+		0x1f04c, 0x1f04c,
 		0x1f284, 0x1f290,
 		0x1f2c0, 0x1f2c0,
 		0x1f2e0, 0x1f2e0,
 		0x1f300, 0x1f384,
 		0x1f3c0, 0x1f3c8,
 		0x1f408, 0x1f40c,
-		0x1f440, 0x1f44c,
+		0x1f440, 0x1f444,
+		0x1f44c, 0x1f44c,
 		0x1f684, 0x1f690,
 		0x1f6c0, 0x1f6c0,
 		0x1f6e0, 0x1f6e0,
 		0x1f700, 0x1f784,
 		0x1f7c0, 0x1f7c8,
 		0x1f808, 0x1f80c,
-		0x1f840, 0x1f84c,
+		0x1f840, 0x1f844,
+		0x1f84c, 0x1f84c,
 		0x1fa84, 0x1fa90,
 		0x1fac0, 0x1fac0,
 		0x1fae0, 0x1fae0,
 		0x1fb00, 0x1fb84,
 		0x1fbc0, 0x1fbc8,
 		0x1fc08, 0x1fc0c,
-		0x1fc40, 0x1fc4c,
+		0x1fc40, 0x1fc44,
+		0x1fc4c, 0x1fc4c,
 		0x1fe84, 0x1fe90,
 		0x1fec0, 0x1fec0,
 		0x1fee0, 0x1fee0,
 		0x1ff00, 0x1ff84,
 		0x1ffc0, 0x1ffc8,
 		0x30000, 0x30030,
+		0x30038, 0x30038,
+		0x30040, 0x30040,
 		0x30100, 0x30144,
-		0x30190, 0x301d0,
+		0x30190, 0x301a0,
+		0x301a8, 0x301b8,
+		0x301c4, 0x301c8,
+		0x301d0, 0x301d0,
 		0x30200, 0x30318,
-		0x30400, 0x3052c,
+		0x30400, 0x304b4,
+		0x304c0, 0x3052c,
 		0x30540, 0x3061c,
-		0x30800, 0x30834,
+		0x30800, 0x30828,
+		0x30834, 0x30834,
 		0x308c0, 0x30908,
 		0x30910, 0x309ac,
-		0x30a00, 0x30a2c,
+		0x30a00, 0x30a14,
+		0x30a1c, 0x30a2c,
 		0x30a44, 0x30a50,
-		0x30a74, 0x30c24,
+		0x30a74, 0x30a74,
+		0x30a7c, 0x30afc,
+		0x30b08, 0x30c24,
 		0x30d00, 0x30d00,
 		0x30d08, 0x30d14,
 		0x30d1c, 0x30d20,
-		0x30d3c, 0x30d50,
+		0x30d3c, 0x30d3c,
+		0x30d48, 0x30d50,
 		0x31200, 0x3120c,
 		0x31220, 0x31220,
 		0x31240, 0x31240,
@@ -1078,27 +1386,65 @@
 		0x322c8, 0x322fc,
 		0x32600, 0x32630,
 		0x32a00, 0x32abc,
-		0x32b00, 0x32b70,
-		0x33000, 0x33048,
-		0x33060, 0x3309c,
-		0x330f0, 0x33148,
-		0x33160, 0x3319c,
-		0x331f0, 0x332e4,
-		0x332f8, 0x333e4,
-		0x333f8, 0x33448,
-		0x33460, 0x3349c,
-		0x334f0, 0x33548,
-		0x33560, 0x3359c,
-		0x335f0, 0x336e4,
-		0x336f8, 0x337e4,
+		0x32b00, 0x32b10,
+		0x32b20, 0x32b30,
+		0x32b40, 0x32b50,
+		0x32b60, 0x32b70,
+		0x33000, 0x33028,
+		0x33030, 0x33048,
+		0x33060, 0x33068,
+		0x33070, 0x3309c,
+		0x330f0, 0x33128,
+		0x33130, 0x33148,
+		0x33160, 0x33168,
+		0x33170, 0x3319c,
+		0x331f0, 0x33238,
+		0x33240, 0x33240,
+		0x33248, 0x33250,
+		0x3325c, 0x33264,
+		0x33270, 0x332b8,
+		0x332c0, 0x332e4,
+		0x332f8, 0x33338,
+		0x33340, 0x33340,
+		0x33348, 0x33350,
+		0x3335c, 0x33364,
+		0x33370, 0x333b8,
+		0x333c0, 0x333e4,
+		0x333f8, 0x33428,
+		0x33430, 0x33448,
+		0x33460, 0x33468,
+		0x33470, 0x3349c,
+		0x334f0, 0x33528,
+		0x33530, 0x33548,
+		0x33560, 0x33568,
+		0x33570, 0x3359c,
+		0x335f0, 0x33638,
+		0x33640, 0x33640,
+		0x33648, 0x33650,
+		0x3365c, 0x33664,
+		0x33670, 0x336b8,
+		0x336c0, 0x336e4,
+		0x336f8, 0x33738,
+		0x33740, 0x33740,
+		0x33748, 0x33750,
+		0x3375c, 0x33764,
+		0x33770, 0x337b8,
+		0x337c0, 0x337e4,
 		0x337f8, 0x337fc,
 		0x33814, 0x33814,
 		0x3382c, 0x3382c,
 		0x33880, 0x3388c,
 		0x338e8, 0x338ec,
-		0x33900, 0x33948,
-		0x33960, 0x3399c,
-		0x339f0, 0x33ae4,
+		0x33900, 0x33928,
+		0x33930, 0x33948,
+		0x33960, 0x33968,
+		0x33970, 0x3399c,
+		0x339f0, 0x33a38,
+		0x33a40, 0x33a40,
+		0x33a48, 0x33a50,
+		0x33a5c, 0x33a64,
+		0x33a70, 0x33ab8,
+		0x33ac0, 0x33ae4,
 		0x33af8, 0x33b10,
 		0x33b28, 0x33b28,
 		0x33b3c, 0x33b50,
@@ -1107,21 +1453,32 @@
 		0x33c3c, 0x33c50,
 		0x33cf0, 0x33cfc,
 		0x34000, 0x34030,
+		0x34038, 0x34038,
+		0x34040, 0x34040,
 		0x34100, 0x34144,
-		0x34190, 0x341d0,
+		0x34190, 0x341a0,
+		0x341a8, 0x341b8,
+		0x341c4, 0x341c8,
+		0x341d0, 0x341d0,
 		0x34200, 0x34318,
-		0x34400, 0x3452c,
+		0x34400, 0x344b4,
+		0x344c0, 0x3452c,
 		0x34540, 0x3461c,
-		0x34800, 0x34834,
+		0x34800, 0x34828,
+		0x34834, 0x34834,
 		0x348c0, 0x34908,
 		0x34910, 0x349ac,
-		0x34a00, 0x34a2c,
+		0x34a00, 0x34a14,
+		0x34a1c, 0x34a2c,
 		0x34a44, 0x34a50,
-		0x34a74, 0x34c24,
+		0x34a74, 0x34a74,
+		0x34a7c, 0x34afc,
+		0x34b08, 0x34c24,
 		0x34d00, 0x34d00,
 		0x34d08, 0x34d14,
 		0x34d1c, 0x34d20,
-		0x34d3c, 0x34d50,
+		0x34d3c, 0x34d3c,
+		0x34d48, 0x34d50,
 		0x35200, 0x3520c,
 		0x35220, 0x35220,
 		0x35240, 0x35240,
@@ -1141,27 +1498,65 @@
 		0x362c8, 0x362fc,
 		0x36600, 0x36630,
 		0x36a00, 0x36abc,
-		0x36b00, 0x36b70,
-		0x37000, 0x37048,
-		0x37060, 0x3709c,
-		0x370f0, 0x37148,
-		0x37160, 0x3719c,
-		0x371f0, 0x372e4,
-		0x372f8, 0x373e4,
-		0x373f8, 0x37448,
-		0x37460, 0x3749c,
-		0x374f0, 0x37548,
-		0x37560, 0x3759c,
-		0x375f0, 0x376e4,
-		0x376f8, 0x377e4,
+		0x36b00, 0x36b10,
+		0x36b20, 0x36b30,
+		0x36b40, 0x36b50,
+		0x36b60, 0x36b70,
+		0x37000, 0x37028,
+		0x37030, 0x37048,
+		0x37060, 0x37068,
+		0x37070, 0x3709c,
+		0x370f0, 0x37128,
+		0x37130, 0x37148,
+		0x37160, 0x37168,
+		0x37170, 0x3719c,
+		0x371f0, 0x37238,
+		0x37240, 0x37240,
+		0x37248, 0x37250,
+		0x3725c, 0x37264,
+		0x37270, 0x372b8,
+		0x372c0, 0x372e4,
+		0x372f8, 0x37338,
+		0x37340, 0x37340,
+		0x37348, 0x37350,
+		0x3735c, 0x37364,
+		0x37370, 0x373b8,
+		0x373c0, 0x373e4,
+		0x373f8, 0x37428,
+		0x37430, 0x37448,
+		0x37460, 0x37468,
+		0x37470, 0x3749c,
+		0x374f0, 0x37528,
+		0x37530, 0x37548,
+		0x37560, 0x37568,
+		0x37570, 0x3759c,
+		0x375f0, 0x37638,
+		0x37640, 0x37640,
+		0x37648, 0x37650,
+		0x3765c, 0x37664,
+		0x37670, 0x376b8,
+		0x376c0, 0x376e4,
+		0x376f8, 0x37738,
+		0x37740, 0x37740,
+		0x37748, 0x37750,
+		0x3775c, 0x37764,
+		0x37770, 0x377b8,
+		0x377c0, 0x377e4,
 		0x377f8, 0x377fc,
 		0x37814, 0x37814,
 		0x3782c, 0x3782c,
 		0x37880, 0x3788c,
 		0x378e8, 0x378ec,
-		0x37900, 0x37948,
-		0x37960, 0x3799c,
-		0x379f0, 0x37ae4,
+		0x37900, 0x37928,
+		0x37930, 0x37948,
+		0x37960, 0x37968,
+		0x37970, 0x3799c,
+		0x379f0, 0x37a38,
+		0x37a40, 0x37a40,
+		0x37a48, 0x37a50,
+		0x37a5c, 0x37a64,
+		0x37a70, 0x37ab8,
+		0x37ac0, 0x37ae4,
 		0x37af8, 0x37b10,
 		0x37b28, 0x37b28,
 		0x37b3c, 0x37b50,
@@ -1170,21 +1565,32 @@
 		0x37c3c, 0x37c50,
 		0x37cf0, 0x37cfc,
 		0x38000, 0x38030,
+		0x38038, 0x38038,
+		0x38040, 0x38040,
 		0x38100, 0x38144,
-		0x38190, 0x381d0,
+		0x38190, 0x381a0,
+		0x381a8, 0x381b8,
+		0x381c4, 0x381c8,
+		0x381d0, 0x381d0,
 		0x38200, 0x38318,
-		0x38400, 0x3852c,
+		0x38400, 0x384b4,
+		0x384c0, 0x3852c,
 		0x38540, 0x3861c,
-		0x38800, 0x38834,
+		0x38800, 0x38828,
+		0x38834, 0x38834,
 		0x388c0, 0x38908,
 		0x38910, 0x389ac,
-		0x38a00, 0x38a2c,
+		0x38a00, 0x38a14,
+		0x38a1c, 0x38a2c,
 		0x38a44, 0x38a50,
-		0x38a74, 0x38c24,
+		0x38a74, 0x38a74,
+		0x38a7c, 0x38afc,
+		0x38b08, 0x38c24,
 		0x38d00, 0x38d00,
 		0x38d08, 0x38d14,
 		0x38d1c, 0x38d20,
-		0x38d3c, 0x38d50,
+		0x38d3c, 0x38d3c,
+		0x38d48, 0x38d50,
 		0x39200, 0x3920c,
 		0x39220, 0x39220,
 		0x39240, 0x39240,
@@ -1204,27 +1610,65 @@
 		0x3a2c8, 0x3a2fc,
 		0x3a600, 0x3a630,
 		0x3aa00, 0x3aabc,
-		0x3ab00, 0x3ab70,
-		0x3b000, 0x3b048,
-		0x3b060, 0x3b09c,
-		0x3b0f0, 0x3b148,
-		0x3b160, 0x3b19c,
-		0x3b1f0, 0x3b2e4,
-		0x3b2f8, 0x3b3e4,
-		0x3b3f8, 0x3b448,
-		0x3b460, 0x3b49c,
-		0x3b4f0, 0x3b548,
-		0x3b560, 0x3b59c,
-		0x3b5f0, 0x3b6e4,
-		0x3b6f8, 0x3b7e4,
+		0x3ab00, 0x3ab10,
+		0x3ab20, 0x3ab30,
+		0x3ab40, 0x3ab50,
+		0x3ab60, 0x3ab70,
+		0x3b000, 0x3b028,
+		0x3b030, 0x3b048,
+		0x3b060, 0x3b068,
+		0x3b070, 0x3b09c,
+		0x3b0f0, 0x3b128,
+		0x3b130, 0x3b148,
+		0x3b160, 0x3b168,
+		0x3b170, 0x3b19c,
+		0x3b1f0, 0x3b238,
+		0x3b240, 0x3b240,
+		0x3b248, 0x3b250,
+		0x3b25c, 0x3b264,
+		0x3b270, 0x3b2b8,
+		0x3b2c0, 0x3b2e4,
+		0x3b2f8, 0x3b338,
+		0x3b340, 0x3b340,
+		0x3b348, 0x3b350,
+		0x3b35c, 0x3b364,
+		0x3b370, 0x3b3b8,
+		0x3b3c0, 0x3b3e4,
+		0x3b3f8, 0x3b428,
+		0x3b430, 0x3b448,
+		0x3b460, 0x3b468,
+		0x3b470, 0x3b49c,
+		0x3b4f0, 0x3b528,
+		0x3b530, 0x3b548,
+		0x3b560, 0x3b568,
+		0x3b570, 0x3b59c,
+		0x3b5f0, 0x3b638,
+		0x3b640, 0x3b640,
+		0x3b648, 0x3b650,
+		0x3b65c, 0x3b664,
+		0x3b670, 0x3b6b8,
+		0x3b6c0, 0x3b6e4,
+		0x3b6f8, 0x3b738,
+		0x3b740, 0x3b740,
+		0x3b748, 0x3b750,
+		0x3b75c, 0x3b764,
+		0x3b770, 0x3b7b8,
+		0x3b7c0, 0x3b7e4,
 		0x3b7f8, 0x3b7fc,
 		0x3b814, 0x3b814,
 		0x3b82c, 0x3b82c,
 		0x3b880, 0x3b88c,
 		0x3b8e8, 0x3b8ec,
-		0x3b900, 0x3b948,
-		0x3b960, 0x3b99c,
-		0x3b9f0, 0x3bae4,
+		0x3b900, 0x3b928,
+		0x3b930, 0x3b948,
+		0x3b960, 0x3b968,
+		0x3b970, 0x3b99c,
+		0x3b9f0, 0x3ba38,
+		0x3ba40, 0x3ba40,
+		0x3ba48, 0x3ba50,
+		0x3ba5c, 0x3ba64,
+		0x3ba70, 0x3bab8,
+		0x3bac0, 0x3bae4,
 		0x3baf8, 0x3bb10,
 		0x3bb28, 0x3bb28,
 		0x3bb3c, 0x3bb50,
@@ -1233,21 +1677,32 @@
 		0x3bc3c, 0x3bc50,
 		0x3bcf0, 0x3bcfc,
 		0x3c000, 0x3c030,
+		0x3c038, 0x3c038,
+		0x3c040, 0x3c040,
 		0x3c100, 0x3c144,
-		0x3c190, 0x3c1d0,
+		0x3c190, 0x3c1a0,
+		0x3c1a8, 0x3c1b8,
+		0x3c1c4, 0x3c1c8,
+		0x3c1d0, 0x3c1d0,
 		0x3c200, 0x3c318,
-		0x3c400, 0x3c52c,
+		0x3c400, 0x3c4b4,
+		0x3c4c0, 0x3c52c,
 		0x3c540, 0x3c61c,
-		0x3c800, 0x3c834,
+		0x3c800, 0x3c828,
+		0x3c834, 0x3c834,
 		0x3c8c0, 0x3c908,
 		0x3c910, 0x3c9ac,
-		0x3ca00, 0x3ca2c,
+		0x3ca00, 0x3ca14,
+		0x3ca1c, 0x3ca2c,
 		0x3ca44, 0x3ca50,
-		0x3ca74, 0x3cc24,
+		0x3ca74, 0x3ca74,
+		0x3ca7c, 0x3cafc,
+		0x3cb08, 0x3cc24,
 		0x3cd00, 0x3cd00,
 		0x3cd08, 0x3cd14,
 		0x3cd1c, 0x3cd20,
-		0x3cd3c, 0x3cd50,
+		0x3cd3c, 0x3cd3c,
+		0x3cd48, 0x3cd50,
 		0x3d200, 0x3d20c,
 		0x3d220, 0x3d220,
 		0x3d240, 0x3d240,
@@ -1267,27 +1722,65 @@
 		0x3e2c8, 0x3e2fc,
 		0x3e600, 0x3e630,
 		0x3ea00, 0x3eabc,
-		0x3eb00, 0x3eb70,
-		0x3f000, 0x3f048,
-		0x3f060, 0x3f09c,
-		0x3f0f0, 0x3f148,
-		0x3f160, 0x3f19c,
-		0x3f1f0, 0x3f2e4,
-		0x3f2f8, 0x3f3e4,
-		0x3f3f8, 0x3f448,
-		0x3f460, 0x3f49c,
-		0x3f4f0, 0x3f548,
-		0x3f560, 0x3f59c,
-		0x3f5f0, 0x3f6e4,
-		0x3f6f8, 0x3f7e4,
+		0x3eb00, 0x3eb10,
+		0x3eb20, 0x3eb30,
+		0x3eb40, 0x3eb50,
+		0x3eb60, 0x3eb70,
+		0x3f000, 0x3f028,
+		0x3f030, 0x3f048,
+		0x3f060, 0x3f068,
+		0x3f070, 0x3f09c,
+		0x3f0f0, 0x3f128,
+		0x3f130, 0x3f148,
+		0x3f160, 0x3f168,
+		0x3f170, 0x3f19c,
+		0x3f1f0, 0x3f238,
+		0x3f240, 0x3f240,
+		0x3f248, 0x3f250,
+		0x3f25c, 0x3f264,
+		0x3f270, 0x3f2b8,
+		0x3f2c0, 0x3f2e4,
+		0x3f2f8, 0x3f338,
+		0x3f340, 0x3f340,
+		0x3f348, 0x3f350,
+		0x3f35c, 0x3f364,
+		0x3f370, 0x3f3b8,
+		0x3f3c0, 0x3f3e4,
+		0x3f3f8, 0x3f428,
+		0x3f430, 0x3f448,
+		0x3f460, 0x3f468,
+		0x3f470, 0x3f49c,
+		0x3f4f0, 0x3f528,
+		0x3f530, 0x3f548,
+		0x3f560, 0x3f568,
+		0x3f570, 0x3f59c,
+		0x3f5f0, 0x3f638,
+		0x3f640, 0x3f640,
+		0x3f648, 0x3f650,
+		0x3f65c, 0x3f664,
+		0x3f670, 0x3f6b8,
+		0x3f6c0, 0x3f6e4,
+		0x3f6f8, 0x3f738,
+		0x3f740, 0x3f740,
+		0x3f748, 0x3f750,
+		0x3f75c, 0x3f764,
+		0x3f770, 0x3f7b8,
+		0x3f7c0, 0x3f7e4,
 		0x3f7f8, 0x3f7fc,
 		0x3f814, 0x3f814,
 		0x3f82c, 0x3f82c,
 		0x3f880, 0x3f88c,
 		0x3f8e8, 0x3f8ec,
-		0x3f900, 0x3f948,
-		0x3f960, 0x3f99c,
-		0x3f9f0, 0x3fae4,
+		0x3f900, 0x3f928,
+		0x3f930, 0x3f948,
+		0x3f960, 0x3f968,
+		0x3f970, 0x3f99c,
+		0x3f9f0, 0x3fa38,
+		0x3fa40, 0x3fa40,
+		0x3fa48, 0x3fa50,
+		0x3fa5c, 0x3fa64,
+		0x3fa70, 0x3fab8,
+		0x3fac0, 0x3fae4,
 		0x3faf8, 0x3fb10,
 		0x3fb28, 0x3fb28,
 		0x3fb3c, 0x3fb50,
@@ -1296,108 +1789,224 @@
 		0x3fc3c, 0x3fc50,
 		0x3fcf0, 0x3fcfc,
 		0x40000, 0x4000c,
-		0x40040, 0x40068,
-		0x4007c, 0x40144,
+		0x40040, 0x40050,
+		0x40060, 0x40068,
+		0x4007c, 0x4008c,
+		0x40094, 0x400b0,
+		0x400c0, 0x40144,
 		0x40180, 0x4018c,
-		0x40200, 0x40298,
-		0x402ac, 0x4033c,
+		0x40200, 0x40254,
+		0x40260, 0x40264,
+		0x40270, 0x40288,
+		0x40290, 0x40298,
+		0x402ac, 0x402c8,
+		0x402d0, 0x402e0,
+		0x402f0, 0x402f0,
+		0x40300, 0x4033c,
 		0x403f8, 0x403fc,
 		0x41304, 0x413c4,
-		0x41400, 0x4141c,
+		0x41400, 0x4140c,
+		0x41414, 0x4141c,
 		0x41480, 0x414d0,
-		0x44000, 0x44078,
-		0x440c0, 0x44278,
-		0x442c0, 0x44478,
-		0x444c0, 0x44678,
-		0x446c0, 0x44878,
-		0x448c0, 0x449fc,
-		0x45000, 0x45068,
+		0x44000, 0x44054,
+		0x4405c, 0x44078,
+		0x440c0, 0x44174,
+		0x44180, 0x441ac,
+		0x441b4, 0x441b8,
+		0x441c0, 0x44254,
+		0x4425c, 0x44278,
+		0x442c0, 0x44374,
+		0x44380, 0x443ac,
+		0x443b4, 0x443b8,
+		0x443c0, 0x44454,
+		0x4445c, 0x44478,
+		0x444c0, 0x44574,
+		0x44580, 0x445ac,
+		0x445b4, 0x445b8,
+		0x445c0, 0x44654,
+		0x4465c, 0x44678,
+		0x446c0, 0x44774,
+		0x44780, 0x447ac,
+		0x447b4, 0x447b8,
+		0x447c0, 0x44854,
+		0x4485c, 0x44878,
+		0x448c0, 0x44974,
+		0x44980, 0x449ac,
+		0x449b4, 0x449b8,
+		0x449c0, 0x449fc,
+		0x45000, 0x45004,
+		0x45010, 0x45030,
+		0x45040, 0x45060,
+		0x45068, 0x45068,
 		0x45080, 0x45084,
 		0x450a0, 0x450b0,
-		0x45200, 0x45268,
+		0x45200, 0x45204,
+		0x45210, 0x45230,
+		0x45240, 0x45260,
+		0x45268, 0x45268,
 		0x45280, 0x45284,
 		0x452a0, 0x452b0,
 		0x460c0, 0x460e4,
-		0x47000, 0x4708c,
+		0x47000, 0x4703c,
+		0x47044, 0x4708c,
 		0x47200, 0x47250,
-		0x47400, 0x47420,
+		0x47400, 0x47408,
+		0x47414, 0x47420,
 		0x47600, 0x47618,
 		0x47800, 0x47814,
 		0x48000, 0x4800c,
-		0x48040, 0x48068,
-		0x4807c, 0x48144,
+		0x48040, 0x48050,
+		0x48060, 0x48068,
+		0x4807c, 0x4808c,
+		0x48094, 0x480b0,
+		0x480c0, 0x48144,
 		0x48180, 0x4818c,
-		0x48200, 0x48298,
-		0x482ac, 0x4833c,
+		0x48200, 0x48254,
+		0x48260, 0x48264,
+		0x48270, 0x48288,
+		0x48290, 0x48298,
+		0x482ac, 0x482c8,
+		0x482d0, 0x482e0,
+		0x482f0, 0x482f0,
+		0x48300, 0x4833c,
 		0x483f8, 0x483fc,
 		0x49304, 0x493c4,
-		0x49400, 0x4941c,
+		0x49400, 0x4940c,
+		0x49414, 0x4941c,
 		0x49480, 0x494d0,
-		0x4c000, 0x4c078,
-		0x4c0c0, 0x4c278,
-		0x4c2c0, 0x4c478,
-		0x4c4c0, 0x4c678,
-		0x4c6c0, 0x4c878,
-		0x4c8c0, 0x4c9fc,
-		0x4d000, 0x4d068,
+		0x4c000, 0x4c054,
+		0x4c05c, 0x4c078,
+		0x4c0c0, 0x4c174,
+		0x4c180, 0x4c1ac,
+		0x4c1b4, 0x4c1b8,
+		0x4c1c0, 0x4c254,
+		0x4c25c, 0x4c278,
+		0x4c2c0, 0x4c374,
+		0x4c380, 0x4c3ac,
+		0x4c3b4, 0x4c3b8,
+		0x4c3c0, 0x4c454,
+		0x4c45c, 0x4c478,
+		0x4c4c0, 0x4c574,
+		0x4c580, 0x4c5ac,
+		0x4c5b4, 0x4c5b8,
+		0x4c5c0, 0x4c654,
+		0x4c65c, 0x4c678,
+		0x4c6c0, 0x4c774,
+		0x4c780, 0x4c7ac,
+		0x4c7b4, 0x4c7b8,
+		0x4c7c0, 0x4c854,
+		0x4c85c, 0x4c878,
+		0x4c8c0, 0x4c974,
+		0x4c980, 0x4c9ac,
+		0x4c9b4, 0x4c9b8,
+		0x4c9c0, 0x4c9fc,
+		0x4d000, 0x4d004,
+		0x4d010, 0x4d030,
+		0x4d040, 0x4d060,
+		0x4d068, 0x4d068,
 		0x4d080, 0x4d084,
 		0x4d0a0, 0x4d0b0,
-		0x4d200, 0x4d268,
+		0x4d200, 0x4d204,
+		0x4d210, 0x4d230,
+		0x4d240, 0x4d260,
+		0x4d268, 0x4d268,
 		0x4d280, 0x4d284,
 		0x4d2a0, 0x4d2b0,
 		0x4e0c0, 0x4e0e4,
-		0x4f000, 0x4f08c,
+		0x4f000, 0x4f03c,
+		0x4f044, 0x4f08c,
 		0x4f200, 0x4f250,
-		0x4f400, 0x4f420,
+		0x4f400, 0x4f408,
+		0x4f414, 0x4f420,
 		0x4f600, 0x4f618,
 		0x4f800, 0x4f814,
-		0x50000, 0x500cc,
+		0x50000, 0x50084,
+		0x50090, 0x500cc,
 		0x50400, 0x50400,
-		0x50800, 0x508cc,
+		0x50800, 0x50884,
+		0x50890, 0x508cc,
 		0x50c00, 0x50c00,
 		0x51000, 0x5101c,
 		0x51300, 0x51308,
 	};
 
 	static const unsigned int t6_reg_ranges[] = {
-		0x1008, 0x1124,
-		0x1138, 0x114c,
-		0x1180, 0x11b4,
+		0x1008, 0x101c,
+		0x1024, 0x10a8,
+		0x10b4, 0x10f8,
+		0x1100, 0x1114,
+		0x111c, 0x112c,
+		0x1138, 0x113c,
+		0x1144, 0x114c,
+		0x1180, 0x1184,
+		0x1190, 0x1194,
+		0x11a0, 0x11a4,
+		0x11b0, 0x11b4,
 		0x11fc, 0x1254,
 		0x1280, 0x133c,
 		0x1800, 0x18fc,
 		0x3000, 0x302c,
-		0x3060, 0x30d8,
+		0x3060, 0x30b0,
+		0x30b8, 0x30d8,
 		0x30e0, 0x30fc,
 		0x3140, 0x357c,
 		0x35a8, 0x35cc,
 		0x35ec, 0x35ec,
 		0x3600, 0x5624,
-		0x56cc, 0x575c,
+		0x56cc, 0x56ec,
+		0x56f4, 0x5720,
+		0x5728, 0x575c,
 		0x580c, 0x5814,
-		0x5890, 0x58bc,
+		0x5890, 0x589c,
+		0x58a4, 0x58ac,
+		0x58b8, 0x58bc,
 		0x5940, 0x595c,
 		0x5980, 0x598c,
-		0x59b0, 0x59dc,
+		0x59b0, 0x59c8,
+		0x59d0, 0x59dc,
 		0x59fc, 0x5a18,
 		0x5a60, 0x5a6c,
-		0x5a80, 0x5a9c,
+		0x5a80, 0x5a8c,
+		0x5a94, 0x5a9c,
 		0x5b94, 0x5bfc,
-		0x5c10, 0x5ec0,
+		0x5c10, 0x5e48,
+		0x5e50, 0x5e94,
+		0x5ea0, 0x5eb0,
+		0x5ec0, 0x5ec0,
 		0x5ec8, 0x5ecc,
-		0x6000, 0x6040,
-		0x6058, 0x619c,
+		0x6000, 0x6020,
+		0x6028, 0x6040,
+		0x6058, 0x609c,
+		0x60a8, 0x619c,
 		0x7700, 0x7798,
 		0x77c0, 0x7880,
 		0x78cc, 0x78fc,
-		0x7b00, 0x7c54,
-		0x7d00, 0x7efc,
+		0x7b00, 0x7b58,
+		0x7b60, 0x7b84,
+		0x7b8c, 0x7c54,
+		0x7d00, 0x7d38,
+		0x7d40, 0x7d84,
+		0x7d8c, 0x7ddc,
+		0x7de4, 0x7e04,
+		0x7e10, 0x7e1c,
+		0x7e24, 0x7e38,
+		0x7e40, 0x7e44,
+		0x7e4c, 0x7e78,
+		0x7e80, 0x7edc,
+		0x7ee8, 0x7efc,
 		0x8dc0, 0x8de4,
-		0x8df8, 0x8e84,
+		0x8df8, 0x8e04,
+		0x8e10, 0x8e84,
 		0x8ea0, 0x8f88,
-		0x8fb8, 0x9124,
+		0x8fb8, 0x9058,
+		0x9060, 0x9060,
+		0x9068, 0x90f8,
+		0x9100, 0x9124,
 		0x9400, 0x9470,
-		0x9600, 0x971c,
+		0x9600, 0x9600,
+		0x9608, 0x9638,
+		0x9640, 0x9704,
+		0x9710, 0x971c,
 		0x9800, 0x9808,
 		0x9820, 0x983c,
 		0x9850, 0x9864,
@@ -1411,109 +2020,170 @@
 		0x9f80, 0xa020,
 		0xd004, 0xd03c,
 		0xd100, 0xd118,
-		0xd200, 0xd31c,
+		0xd200, 0xd214,
+		0xd220, 0xd234,
+		0xd240, 0xd254,
+		0xd260, 0xd274,
+		0xd280, 0xd294,
+		0xd2a0, 0xd2b4,
+		0xd2c0, 0xd2d4,
+		0xd2e0, 0xd2f4,
+		0xd300, 0xd31c,
 		0xdfc0, 0xdfe0,
 		0xe000, 0xf008,
 		0x11000, 0x11014,
-		0x11048, 0x1117c,
-		0x11190, 0x11270,
+		0x11048, 0x1106c,
+		0x11074, 0x11088,
+		0x11098, 0x11120,
+		0x1112c, 0x1117c,
+		0x11190, 0x112e0,
 		0x11300, 0x1130c,
 		0x12000, 0x1206c,
 		0x19040, 0x1906c,
 		0x19078, 0x19080,
-		0x1908c, 0x19124,
-		0x19150, 0x191b0,
+		0x1908c, 0x190e8,
+		0x190f0, 0x190f8,
+		0x19100, 0x19110,
+		0x19120, 0x19124,
+		0x19150, 0x19194,
+		0x1919c, 0x191b0,
 		0x191d0, 0x191e8,
-		0x19238, 0x192bc,
-		0x193f8, 0x19474,
+		0x19238, 0x192b0,
+		0x192bc, 0x192bc,
+		0x19348, 0x1934c,
+		0x193f8, 0x19418,
+		0x19420, 0x19428,
+		0x19430, 0x19444,
+		0x1944c, 0x1946c,
+		0x19474, 0x19474,
 		0x19490, 0x194cc,
 		0x194f0, 0x194f8,
-		0x19c00, 0x19c80,
-		0x19c94, 0x19cbc,
-		0x19ce4, 0x19d28,
+		0x19c00, 0x19c48,
+		0x19c50, 0x19c80,
+		0x19c94, 0x19c98,
+		0x19ca0, 0x19cbc,
+		0x19ce4, 0x19ce4,
+		0x19cf0, 0x19cf8,
+		0x19d00, 0x19d28,
 		0x19d50, 0x19d78,
-		0x19d94, 0x19dc8,
+		0x19d94, 0x19d98,
+		0x19da0, 0x19dc8,
 		0x19df0, 0x19e10,
 		0x19e50, 0x19e6c,
-		0x19ea0, 0x19f34,
+		0x19ea0, 0x19ebc,
+		0x19ec4, 0x19ef4,
+		0x19f04, 0x19f2c,
+		0x19f34, 0x19f34,
 		0x19f40, 0x19f50,
 		0x19f90, 0x19fac,
-		0x19fc4, 0x19fe4,
-		0x1a000, 0x1a06c,
-		0x1a0b0, 0x1a120,
-		0x1a128, 0x1a138,
+		0x19fc4, 0x19fc8,
+		0x19fd0, 0x19fe4,
+		0x1a000, 0x1a004,
+		0x1a010, 0x1a06c,
+		0x1a0b0, 0x1a0e4,
+		0x1a0ec, 0x1a0f8,
+		0x1a100, 0x1a108,
+		0x1a114, 0x1a120,
+		0x1a128, 0x1a130,
+		0x1a138, 0x1a138,
 		0x1a190, 0x1a1c4,
 		0x1a1fc, 0x1a1fc,
 		0x1e008, 0x1e00c,
-		0x1e040, 0x1e04c,
+		0x1e040, 0x1e044,
+		0x1e04c, 0x1e04c,
 		0x1e284, 0x1e290,
 		0x1e2c0, 0x1e2c0,
 		0x1e2e0, 0x1e2e0,
 		0x1e300, 0x1e384,
 		0x1e3c0, 0x1e3c8,
 		0x1e408, 0x1e40c,
-		0x1e440, 0x1e44c,
+		0x1e440, 0x1e444,
+		0x1e44c, 0x1e44c,
 		0x1e684, 0x1e690,
 		0x1e6c0, 0x1e6c0,
 		0x1e6e0, 0x1e6e0,
 		0x1e700, 0x1e784,
 		0x1e7c0, 0x1e7c8,
 		0x1e808, 0x1e80c,
-		0x1e840, 0x1e84c,
+		0x1e840, 0x1e844,
+		0x1e84c, 0x1e84c,
 		0x1ea84, 0x1ea90,
 		0x1eac0, 0x1eac0,
 		0x1eae0, 0x1eae0,
 		0x1eb00, 0x1eb84,
 		0x1ebc0, 0x1ebc8,
 		0x1ec08, 0x1ec0c,
-		0x1ec40, 0x1ec4c,
+		0x1ec40, 0x1ec44,
+		0x1ec4c, 0x1ec4c,
 		0x1ee84, 0x1ee90,
 		0x1eec0, 0x1eec0,
 		0x1eee0, 0x1eee0,
 		0x1ef00, 0x1ef84,
 		0x1efc0, 0x1efc8,
 		0x1f008, 0x1f00c,
-		0x1f040, 0x1f04c,
+		0x1f040, 0x1f044,
+		0x1f04c, 0x1f04c,
 		0x1f284, 0x1f290,
 		0x1f2c0, 0x1f2c0,
 		0x1f2e0, 0x1f2e0,
 		0x1f300, 0x1f384,
 		0x1f3c0, 0x1f3c8,
 		0x1f408, 0x1f40c,
-		0x1f440, 0x1f44c,
+		0x1f440, 0x1f444,
+		0x1f44c, 0x1f44c,
 		0x1f684, 0x1f690,
 		0x1f6c0, 0x1f6c0,
 		0x1f6e0, 0x1f6e0,
 		0x1f700, 0x1f784,
 		0x1f7c0, 0x1f7c8,
 		0x1f808, 0x1f80c,
-		0x1f840, 0x1f84c,
+		0x1f840, 0x1f844,
+		0x1f84c, 0x1f84c,
 		0x1fa84, 0x1fa90,
 		0x1fac0, 0x1fac0,
 		0x1fae0, 0x1fae0,
 		0x1fb00, 0x1fb84,
 		0x1fbc0, 0x1fbc8,
 		0x1fc08, 0x1fc0c,
-		0x1fc40, 0x1fc4c,
+		0x1fc40, 0x1fc44,
+		0x1fc4c, 0x1fc4c,
 		0x1fe84, 0x1fe90,
 		0x1fec0, 0x1fec0,
 		0x1fee0, 0x1fee0,
 		0x1ff00, 0x1ff84,
 		0x1ffc0, 0x1ffc8,
-		0x30000, 0x30070,
-		0x30100, 0x301d0,
+		0x30000, 0x30030,
+		0x30038, 0x30038,
+		0x30040, 0x30040,
+		0x30048, 0x30048,
+		0x30050, 0x30050,
+		0x3005c, 0x30060,
+		0x30068, 0x30068,
+		0x30070, 0x30070,
+		0x30100, 0x30168,
+		0x30190, 0x301a0,
+		0x301a8, 0x301b8,
+		0x301c4, 0x301c8,
+		0x301d0, 0x301d0,
 		0x30200, 0x30320,
-		0x30400, 0x3052c,
+		0x30400, 0x304b4,
+		0x304c0, 0x3052c,
 		0x30540, 0x3061c,
-		0x30800, 0x30890,
+		0x30800, 0x308a0,
 		0x308c0, 0x30908,
 		0x30910, 0x309b8,
 		0x30a00, 0x30a04,
-		0x30a0c, 0x30a2c,
+		0x30a0c, 0x30a14,
+		0x30a1c, 0x30a2c,
 		0x30a44, 0x30a50,
-		0x30a74, 0x30c24,
-		0x30d00, 0x30d3c,
-		0x30d44, 0x30d7c,
+		0x30a74, 0x30a74,
+		0x30a7c, 0x30afc,
+		0x30b08, 0x30c24,
+		0x30d00, 0x30d14,
+		0x30d1c, 0x30d3c,
+		0x30d44, 0x30d4c,
+		0x30d54, 0x30d74,
+		0x30d7c, 0x30d7c,
 		0x30de0, 0x30de0,
 		0x30e00, 0x30ed4,
 		0x30f00, 0x30fa4,
@@ -1542,7 +2212,8 @@
 		0x31bb0, 0x31bb4,
 		0x31bc8, 0x31bd4,
 		0x32140, 0x3218c,
-		0x321f0, 0x32200,
+		0x321f0, 0x321f4,
+		0x32200, 0x32200,
 		0x32218, 0x32218,
 		0x32400, 0x32400,
 		0x32408, 0x3241c,
@@ -1551,46 +2222,108 @@
 		0x326a8, 0x326a8,
 		0x326ec, 0x326ec,
 		0x32a00, 0x32abc,
-		0x32b00, 0x32b78,
+		0x32b00, 0x32b38,
+		0x32b40, 0x32b58,
+		0x32b60, 0x32b78,
 		0x32c00, 0x32c00,
 		0x32c08, 0x32c3c,
 		0x32e00, 0x32e2c,
 		0x32f00, 0x32f2c,
-		0x33000, 0x330ac,
-		0x330c0, 0x331ac,
-		0x331c0, 0x332c4,
-		0x332e4, 0x333c4,
-		0x333e4, 0x334ac,
-		0x334c0, 0x335ac,
-		0x335c0, 0x336c4,
-		0x336e4, 0x337c4,
+		0x33000, 0x3302c,
+		0x33034, 0x33050,
+		0x33058, 0x33058,
+		0x33060, 0x3308c,
+		0x3309c, 0x330ac,
+		0x330c0, 0x330c0,
+		0x330c8, 0x330d0,
+		0x330d8, 0x330e0,
+		0x330ec, 0x3312c,
+		0x33134, 0x33150,
+		0x33158, 0x33158,
+		0x33160, 0x3318c,
+		0x3319c, 0x331ac,
+		0x331c0, 0x331c0,
+		0x331c8, 0x331d0,
+		0x331d8, 0x331e0,
+		0x331ec, 0x33290,
+		0x33298, 0x332c4,
+		0x332e4, 0x33390,
+		0x33398, 0x333c4,
+		0x333e4, 0x3342c,
+		0x33434, 0x33450,
+		0x33458, 0x33458,
+		0x33460, 0x3348c,
+		0x3349c, 0x334ac,
+		0x334c0, 0x334c0,
+		0x334c8, 0x334d0,
+		0x334d8, 0x334e0,
+		0x334ec, 0x3352c,
+		0x33534, 0x33550,
+		0x33558, 0x33558,
+		0x33560, 0x3358c,
+		0x3359c, 0x335ac,
+		0x335c0, 0x335c0,
+		0x335c8, 0x335d0,
+		0x335d8, 0x335e0,
+		0x335ec, 0x33690,
+		0x33698, 0x336c4,
+		0x336e4, 0x33790,
+		0x33798, 0x337c4,
 		0x337e4, 0x337fc,
 		0x33814, 0x33814,
 		0x33854, 0x33868,
 		0x33880, 0x3388c,
 		0x338c0, 0x338d0,
 		0x338e8, 0x338ec,
-		0x33900, 0x339ac,
-		0x339c0, 0x33ac4,
+		0x33900, 0x3392c,
+		0x33934, 0x33950,
+		0x33958, 0x33958,
+		0x33960, 0x3398c,
+		0x3399c, 0x339ac,
+		0x339c0, 0x339c0,
+		0x339c8, 0x339d0,
+		0x339d8, 0x339e0,
+		0x339ec, 0x33a90,
+		0x33a98, 0x33ac4,
 		0x33ae4, 0x33b10,
-		0x33b24, 0x33b50,
+		0x33b24, 0x33b28,
+		0x33b38, 0x33b50,
 		0x33bf0, 0x33c10,
-		0x33c24, 0x33c50,
+		0x33c24, 0x33c28,
+		0x33c38, 0x33c50,
 		0x33cf0, 0x33cfc,
-		0x34000, 0x34070,
-		0x34100, 0x341d0,
+		0x34000, 0x34030,
+		0x34038, 0x34038,
+		0x34040, 0x34040,
+		0x34048, 0x34048,
+		0x34050, 0x34050,
+		0x3405c, 0x34060,
+		0x34068, 0x34068,
+		0x34070, 0x34070,
+		0x34100, 0x34168,
+		0x34190, 0x341a0,
+		0x341a8, 0x341b8,
+		0x341c4, 0x341c8,
+		0x341d0, 0x341d0,
 		0x34200, 0x34320,
-		0x34400, 0x3452c,
+		0x34400, 0x344b4,
+		0x344c0, 0x3452c,
 		0x34540, 0x3461c,
-		0x34800, 0x34890,
+		0x34800, 0x348a0,
 		0x348c0, 0x34908,
 		0x34910, 0x349b8,
 		0x34a00, 0x34a04,
-		0x34a0c, 0x34a2c,
+		0x34a0c, 0x34a14,
+		0x34a1c, 0x34a2c,
 		0x34a44, 0x34a50,
-		0x34a74, 0x34c24,
-		0x34d00, 0x34d3c,
-		0x34d44, 0x34d7c,
+		0x34a74, 0x34a74,
+		0x34a7c, 0x34afc,
+		0x34b08, 0x34c24,
+		0x34d00, 0x34d14,
+		0x34d1c, 0x34d3c,
+		0x34d44, 0x34d4c,
+		0x34d54, 0x34d74,
+		0x34d7c, 0x34d7c,
 		0x34de0, 0x34de0,
 		0x34e00, 0x34ed4,
 		0x34f00, 0x34fa4,
@@ -1619,7 +2352,8 @@
 		0x35bb0, 0x35bb4,
 		0x35bc8, 0x35bd4,
 		0x36140, 0x3618c,
-		0x361f0, 0x36200,
+		0x361f0, 0x361f4,
+		0x36200, 0x36200,
 		0x36218, 0x36218,
 		0x36400, 0x36400,
 		0x36408, 0x3641c,
@@ -1628,31 +2362,75 @@
 		0x366a8, 0x366a8,
 		0x366ec, 0x366ec,
 		0x36a00, 0x36abc,
-		0x36b00, 0x36b78,
+		0x36b00, 0x36b38,
+		0x36b40, 0x36b58,
+		0x36b60, 0x36b78,
 		0x36c00, 0x36c00,
 		0x36c08, 0x36c3c,
 		0x36e00, 0x36e2c,
 		0x36f00, 0x36f2c,
-		0x37000, 0x370ac,
-		0x370c0, 0x371ac,
-		0x371c0, 0x372c4,
-		0x372e4, 0x373c4,
-		0x373e4, 0x374ac,
-		0x374c0, 0x375ac,
-		0x375c0, 0x376c4,
-		0x376e4, 0x377c4,
+		0x37000, 0x3702c,
+		0x37034, 0x37050,
+		0x37058, 0x37058,
+		0x37060, 0x3708c,
+		0x3709c, 0x370ac,
+		0x370c0, 0x370c0,
+		0x370c8, 0x370d0,
+		0x370d8, 0x370e0,
+		0x370ec, 0x3712c,
+		0x37134, 0x37150,
+		0x37158, 0x37158,
+		0x37160, 0x3718c,
+		0x3719c, 0x371ac,
+		0x371c0, 0x371c0,
+		0x371c8, 0x371d0,
+		0x371d8, 0x371e0,
+		0x371ec, 0x37290,
+		0x37298, 0x372c4,
+		0x372e4, 0x37390,
+		0x37398, 0x373c4,
+		0x373e4, 0x3742c,
+		0x37434, 0x37450,
+		0x37458, 0x37458,
+		0x37460, 0x3748c,
+		0x3749c, 0x374ac,
+		0x374c0, 0x374c0,
+		0x374c8, 0x374d0,
+		0x374d8, 0x374e0,
+		0x374ec, 0x3752c,
+		0x37534, 0x37550,
+		0x37558, 0x37558,
+		0x37560, 0x3758c,
+		0x3759c, 0x375ac,
+		0x375c0, 0x375c0,
+		0x375c8, 0x375d0,
+		0x375d8, 0x375e0,
+		0x375ec, 0x37690,
+		0x37698, 0x376c4,
+		0x376e4, 0x37790,
+		0x37798, 0x377c4,
 		0x377e4, 0x377fc,
 		0x37814, 0x37814,
 		0x37854, 0x37868,
 		0x37880, 0x3788c,
 		0x378c0, 0x378d0,
 		0x378e8, 0x378ec,
-		0x37900, 0x379ac,
-		0x379c0, 0x37ac4,
+		0x37900, 0x3792c,
+		0x37934, 0x37950,
+		0x37958, 0x37958,
+		0x37960, 0x3798c,
+		0x3799c, 0x379ac,
+		0x379c0, 0x379c0,
+		0x379c8, 0x379d0,
+		0x379d8, 0x379e0,
+		0x379ec, 0x37a90,
+		0x37a98, 0x37ac4,
 		0x37ae4, 0x37b10,
-		0x37b24, 0x37b50,
+		0x37b24, 0x37b28,
+		0x37b38, 0x37b50,
 		0x37bf0, 0x37c10,
-		0x37c24, 0x37c50,
+		0x37c24, 0x37c28,
+		0x37c38, 0x37c50,
 		0x37cf0, 0x37cfc,
 		0x40040, 0x40040,
 		0x40080, 0x40084,
@@ -1664,36 +2442,62 @@
 		0x40280, 0x40280,
 		0x40304, 0x40304,
 		0x40330, 0x4033c,
-		0x41304, 0x413dc,
-		0x41400, 0x4141c,
+		0x41304, 0x413c8,
+		0x413d0, 0x413dc,
+		0x413f0, 0x413f0,
+		0x41400, 0x4140c,
+		0x41414, 0x4141c,
 		0x41480, 0x414d0,
 		0x44000, 0x4407c,
-		0x440c0, 0x4427c,
-		0x442c0, 0x4447c,
-		0x444c0, 0x4467c,
-		0x446c0, 0x4487c,
-		0x448c0, 0x44a7c,
-		0x44ac0, 0x44c7c,
-		0x44cc0, 0x44e7c,
-		0x44ec0, 0x4507c,
-		0x450c0, 0x451fc,
-		0x45800, 0x45868,
+		0x440c0, 0x441ac,
+		0x441b4, 0x4427c,
+		0x442c0, 0x443ac,
+		0x443b4, 0x4447c,
+		0x444c0, 0x445ac,
+		0x445b4, 0x4467c,
+		0x446c0, 0x447ac,
+		0x447b4, 0x4487c,
+		0x448c0, 0x449ac,
+		0x449b4, 0x44a7c,
+		0x44ac0, 0x44bac,
+		0x44bb4, 0x44c7c,
+		0x44cc0, 0x44dac,
+		0x44db4, 0x44e7c,
+		0x44ec0, 0x44fac,
+		0x44fb4, 0x4507c,
+		0x450c0, 0x451ac,
+		0x451b4, 0x451fc,
+		0x45800, 0x45804,
+		0x45810, 0x45830,
+		0x45840, 0x45860,
+		0x45868, 0x45868,
 		0x45880, 0x45884,
 		0x458a0, 0x458b0,
-		0x45a00, 0x45a68,
+		0x45a00, 0x45a04,
+		0x45a10, 0x45a30,
+		0x45a40, 0x45a60,
+		0x45a68, 0x45a68,
 		0x45a80, 0x45a84,
 		0x45aa0, 0x45ab0,
 		0x460c0, 0x460e4,
-		0x47000, 0x4708c,
+		0x47000, 0x4703c,
+		0x47044, 0x4708c,
 		0x47200, 0x47250,
-		0x47400, 0x47420,
+		0x47400, 0x47408,
+		0x47414, 0x47420,
 		0x47600, 0x47618,
-		0x47800, 0x4782c,
-		0x50000, 0x500cc,
+		0x47800, 0x47814,
+		0x47820, 0x4782c,
+		0x50000, 0x50084,
+		0x50090, 0x500cc,
+		0x50300, 0x50384,
 		0x50400, 0x50400,
-		0x50800, 0x508cc,
+		0x50800, 0x50884,
+		0x50890, 0x508cc,
+		0x50b00, 0x50b84,
 		0x50c00, 0x50c00,
-		0x51000, 0x510b0,
+		0x51000, 0x51020,
+		0x51028, 0x510b0,
 		0x51300, 0x51324,
 	};
 
@@ -2177,11 +2981,15 @@
  */
 int t4_check_fw_version(struct adapter *adap)
 {
-	int ret, major, minor, micro;
+	int i, ret, major, minor, micro;
 	int exp_major, exp_minor, exp_micro;
 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
 
 	ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+	/* Try multiple times before returning error */
+	for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
+		ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+
 	if (ret)
 		return ret;
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 640369d..13708fd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -263,4 +263,9 @@
 #undef FLASH_START
 #undef FLASH_MAX_SIZE
 
+#define SGE_TIMESTAMP_S 0
+#define SGE_TIMESTAMP_M 0xfffffffffffffffULL
+#define SGE_TIMESTAMP_V(x) ((__u64)(x) << SGE_TIMESTAMP_S)
+#define SGE_TIMESTAMP_G(x) (((__u64)(x) >> SGE_TIMESTAMP_S) & SGE_TIMESTAMP_M)
+
 #endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 8353a6c..03ed00c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -157,6 +157,11 @@
 	CH_PCI_ID_TABLE_FENTRY(0x5090),	/* Custom T540-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x5091),	/* Custom T522-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x5092),	/* Custom T520-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5093),	/* Custom T580-LP-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5094),	/* Custom T540-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5095),	/* Custom T540-CR-SO */
+	CH_PCI_ID_TABLE_FENTRY(0x5096),	/* Custom T580-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5097),	/* Custom T520-KR */
 
 	/* T6 adapters:
 	 */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 8b53f7d..6401ba99 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -143,6 +143,7 @@
 	struct vnic_dev *vdev;
 	struct timer_list notify_timer;
 	struct work_struct reset;
+	struct work_struct tx_hang_reset;
 	struct work_struct change_mtu_work;
 	struct msix_entry msix_entry[ENIC_INTR_MAX];
 	struct enic_msix_entry msix[ENIC_INTR_MAX];
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 3352d02..0c22fd0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -178,13 +178,15 @@
 	return 0;
 }
 
-static void enic_log_q_error(struct enic *enic)
+static bool enic_log_q_error(struct enic *enic)
 {
 	unsigned int i;
 	u32 error_status;
+	bool err = false;
 
 	for (i = 0; i < enic->wq_count; i++) {
 		error_status = vnic_wq_error_status(&enic->wq[i]);
+		err |= error_status;
 		if (error_status)
 			netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
 				i, error_status);
@@ -192,10 +194,13 @@
 
 	for (i = 0; i < enic->rq_count; i++) {
 		error_status = vnic_rq_error_status(&enic->rq[i]);
+		err |= error_status;
 		if (error_status)
 			netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
 				i, error_status);
 	}
+
+	return err;
 }
 
 static void enic_msglvl_check(struct enic *enic)
@@ -333,10 +338,9 @@
 
 	vnic_intr_return_all_credits(&enic->intr[intr]);
 
-	enic_log_q_error(enic);
-
-	/* schedule recovery from WQ/RQ error */
-	schedule_work(&enic->reset);
+	if (enic_log_q_error(enic))
+		/* schedule recovery from WQ/RQ error */
+		schedule_work(&enic->reset);
 
 	return IRQ_HANDLED;
 }
@@ -804,7 +808,7 @@
 static void enic_tx_timeout(struct net_device *netdev)
 {
 	struct enic *enic = netdev_priv(netdev);
-	schedule_work(&enic->reset);
+	schedule_work(&enic->tx_hang_reset);
 }
 
 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
@@ -1924,6 +1928,19 @@
 	return err;
 }
 
+static int enic_dev_soft_reset(struct enic *enic)
+{
+	int err;
+
+	err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
+			    vnic_dev_soft_reset_done, 0);
+	if (err)
+		netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n",
+			   err);
+
+	return err;
+}
+
 static int enic_dev_hang_reset(struct enic *enic)
 {
 	int err;
@@ -2060,6 +2077,26 @@
 	rtnl_lock();
 
 	spin_lock(&enic->enic_api_lock);
+	enic_stop(enic->netdev);
+	enic_dev_soft_reset(enic);
+	enic_reset_addr_lists(enic);
+	enic_init_vnic_resources(enic);
+	enic_set_rss_nic_cfg(enic);
+	enic_dev_set_ig_vlan_rewrite_mode(enic);
+	enic_open(enic->netdev);
+	spin_unlock(&enic->enic_api_lock);
+	call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
+
+	rtnl_unlock();
+}
+
+static void enic_tx_hang_reset(struct work_struct *work)
+{
+	struct enic *enic = container_of(work, struct enic, tx_hang_reset);
+
+	rtnl_lock();
+
+	spin_lock(&enic->enic_api_lock);
 	enic_dev_hang_notify(enic);
 	enic_stop(enic->netdev);
 	enic_dev_hang_reset(enic);
@@ -2583,6 +2620,7 @@
 
 	enic_set_rx_coal_setting(enic);
 	INIT_WORK(&enic->reset, enic_reset);
+	INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
 	INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
 
 	for (i = 0; i < enic->wq_count; i++)
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index a3badef..1ffd105 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -659,14 +659,14 @@
 	return 0;
 }
 
-static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
 {
 	u64 a0 = (u32)arg, a1 = 0;
 	int wait = 1000;
 	return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
 }
 
-static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
 {
 	u64 a0 = 0, a1 = 0;
 	int wait = 1000;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index b013b6a..54156c4 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -155,7 +155,9 @@
 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev);
 int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev);
 int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
 int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
 	enum vnic_dev_intr_mode intr_mode);
 enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 0a27805..8215409 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -582,6 +582,7 @@
 	u16 pvid;
 	__be16 vxlan_port;
 	int vxlan_port_count;
+	int vxlan_port_aliases;
 	struct phy_info phy;
 	u8 wol_cap;
 	bool wol_en;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 12687bf..7bf51a1 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5176,6 +5176,11 @@
 	if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
 		return;
 
+	if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
+		adapter->vxlan_port_aliases++;
+		return;
+	}
+
 	if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
 		dev_info(dev,
 			 "Only one UDP port supported for VxLAN offloads\n");
@@ -5226,6 +5231,11 @@
 	if (adapter->vxlan_port != port)
 		goto done;
 
+	if (adapter->vxlan_port_aliases) {
+		adapter->vxlan_port_aliases--;
+		return;
+	}
+
 	be_disable_vxlan_offloads(adapter);
 
 	dev_info(&adapter->pdev->dev,
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index a2c96fd..ff66549 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -201,6 +201,7 @@
 	void __iomem *membase;
 	int dma_alloc;
 	resource_size_t io_region_size;
+	bool big_endian;
 
 	unsigned int num_bd;
 	unsigned int num_tx;
@@ -236,12 +237,18 @@
 
 static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
 {
-	return ioread32(dev->iobase + offset);
+	if (dev->big_endian)
+		return ioread32be(dev->iobase + offset);
+	else
+		return ioread32(dev->iobase + offset);
 }
 
 static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
 {
-	iowrite32(data, dev->iobase + offset);
+	if (dev->big_endian)
+		iowrite32be(data, dev->iobase + offset);
+	else
+		iowrite32(data, dev->iobase + offset);
 }
 
 static inline void ethoc_read_bd(struct ethoc *dev, int index,
@@ -1106,6 +1113,9 @@
 		priv->dma_alloc = buffer_size;
 	}
 
+	priv->big_endian = pdata ? pdata->big_endian :
+		of_device_is_big_endian(pdev->dev.of_node);
+
 	/* calculate the number of TX/RX buffers, maximum 128 supported */
 	num_bd = min_t(unsigned int,
 		128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index dd4ca39..501e143 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3070,7 +3070,6 @@
 }
 #endif
 
-#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
 static inline void fec_enet_set_netdev_features(struct net_device *netdev,
 	netdev_features_t features)
 {
@@ -3094,7 +3093,7 @@
 	struct fec_enet_private *fep = netdev_priv(netdev);
 	netdev_features_t changed = features ^ netdev->features;
 
-	if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
+	if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
 		napi_disable(&fep->napi);
 		netif_tx_lock_bh(netdev);
 		fec_stop(netdev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 1543cf0..f9e7446 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -112,9 +112,8 @@
 	unsigned long flags;
 	u32 val, tempval;
 	int inc;
-	struct timespec ts;
+	struct timespec64 ts;
 	u64 ns;
-	u32 remainder;
 	val = 0;
 
 	if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
@@ -163,8 +162,7 @@
 		tempval = readl(fep->hwp + FEC_ATIME);
 		/* Convert the ptp local counter to 1588 timestamp */
 		ns = timecounter_cyc2time(&fep->tc, tempval);
-		ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
-		ts.tv_nsec = remainder;
+		ts = ns_to_timespec64(ns);
 
 		/* The tempval is  less than 3 seconds, and  so val is less than
 		 * 4 seconds. No overflow for 32bit calculation.
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4b69d061..7f5389c 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -907,6 +907,9 @@
 	if (of_find_property(np, "fsl,magic-packet", NULL))
 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
 
+	if (of_get_property(np, "fsl,wake-on-filer", NULL))
+		priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
+
 	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
 
 	/* In the case of a fixed PHY, the DT node associated
@@ -1415,8 +1418,14 @@
 		goto register_fail;
 	}
 
-	device_set_wakeup_capable(&dev->dev, priv->device_flags &
-				  FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
+		priv->wol_supported |= GFAR_WOL_MAGIC;
+
+	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
+	    priv->rx_filer_enable)
+		priv->wol_supported |= GFAR_WOL_FILER_UCAST;
+
+	device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
 
 	/* fill out IRQ number and name fields */
 	for (i = 0; i < priv->num_grps; i++) {
@@ -1479,15 +1488,122 @@
 
 #ifdef CONFIG_PM
 
+static void __gfar_filer_disable(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 temp;
+
+	temp = gfar_read(&regs->rctrl);
+	temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
+	gfar_write(&regs->rctrl, temp);
+}
+
+static void __gfar_filer_enable(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 temp;
+
+	temp = gfar_read(&regs->rctrl);
+	temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
+	gfar_write(&regs->rctrl, temp);
+}
+
+/* Filer rules implementing wol capabilities */
+static void gfar_filer_config_wol(struct gfar_private *priv)
+{
+	unsigned int i;
+	u32 rqfcr;
+
+	__gfar_filer_disable(priv);
+
+	/* clear the filer table, reject any packet by default */
+	rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
+	for (i = 0; i <= MAX_FILER_IDX; i++)
+		gfar_write_filer(priv, i, rqfcr, 0);
+
+	i = 0;
+	if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
+		/* unicast packet, accept it */
+		struct net_device *ndev = priv->ndev;
+		/* get the default rx queue index */
+		u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
+		u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
+				    (ndev->dev_addr[1] << 8) |
+				     ndev->dev_addr[2];
+
+		rqfcr = (qindex << 10) | RQFCR_AND |
+			RQFCR_CMP_EXACT | RQFCR_PID_DAH;
+
+		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
+
+		dest_mac_addr = (ndev->dev_addr[3] << 16) |
+				(ndev->dev_addr[4] << 8) |
+				 ndev->dev_addr[5];
+		rqfcr = (qindex << 10) | RQFCR_GPI |
+			RQFCR_CMP_EXACT | RQFCR_PID_DAL;
+		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
+	}
+
+	__gfar_filer_enable(priv);
+}
+
+static void gfar_filer_restore_table(struct gfar_private *priv)
+{
+	u32 rqfcr, rqfpr;
+	unsigned int i;
+
+	__gfar_filer_disable(priv);
+
+	for (i = 0; i <= MAX_FILER_IDX; i++) {
+		rqfcr = priv->ftp_rqfcr[i];
+		rqfpr = priv->ftp_rqfpr[i];
+		gfar_write_filer(priv, i, rqfcr, rqfpr);
+	}
+
+	__gfar_filer_enable(priv);
+}
+
+/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
+static void gfar_start_wol_filer(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 tempval;
+	int i = 0;
+
+	/* Enable Rx hw queues */
+	gfar_write(&regs->rqueue, priv->rqueue);
+
+	/* Initialize DMACTRL to have WWR and WOP */
+	tempval = gfar_read(&regs->dmactrl);
+	tempval |= DMACTRL_INIT_SETTINGS;
+	gfar_write(&regs->dmactrl, tempval);
+
+	/* Make sure we aren't stopped */
+	tempval = gfar_read(&regs->dmactrl);
+	tempval &= ~DMACTRL_GRS;
+	gfar_write(&regs->dmactrl, tempval);
+
+	for (i = 0; i < priv->num_grps; i++) {
+		regs = priv->gfargrp[i].regs;
+		/* Clear RHLT, so that the DMA starts polling now */
+		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+		/* enable the Filer General Purpose Interrupt */
+		gfar_write(&regs->imask, IMASK_FGPI);
+	}
+
+	/* Enable Rx DMA */
+	tempval = gfar_read(&regs->maccfg1);
+	tempval |= MACCFG1_RX_EN;
+	gfar_write(&regs->maccfg1, tempval);
+}
+
 static int gfar_suspend(struct device *dev)
 {
 	struct gfar_private *priv = dev_get_drvdata(dev);
 	struct net_device *ndev = priv->ndev;
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 tempval;
-	int magic_packet = priv->wol_en &&
-			   (priv->device_flags &
-			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+	u16 wol = priv->wol_opts;
 
 	if (!netif_running(ndev))
 		return 0;
@@ -1499,7 +1615,7 @@
 
 	gfar_halt(priv);
 
-	if (magic_packet) {
+	if (wol & GFAR_WOL_MAGIC) {
 		/* Enable interrupt on Magic Packet */
 		gfar_write(&regs->imask, IMASK_MAG);
 
@@ -1513,6 +1629,10 @@
 		tempval |= MACCFG1_RX_EN;
 		gfar_write(&regs->maccfg1, tempval);
 
+	} else if (wol & GFAR_WOL_FILER_UCAST) {
+		gfar_filer_config_wol(priv);
+		gfar_start_wol_filer(priv);
+
 	} else {
 		phy_stop(priv->phydev);
 	}
@@ -1526,18 +1646,22 @@
 	struct net_device *ndev = priv->ndev;
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 tempval;
-	int magic_packet = priv->wol_en &&
-			   (priv->device_flags &
-			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+	u16 wol = priv->wol_opts;
 
 	if (!netif_running(ndev))
 		return 0;
 
-	if (magic_packet) {
+	if (wol & GFAR_WOL_MAGIC) {
 		/* Disable Magic Packet mode */
 		tempval = gfar_read(&regs->maccfg2);
 		tempval &= ~MACCFG2_MPEN;
 		gfar_write(&regs->maccfg2, tempval);
+
+	} else if (wol & GFAR_WOL_FILER_UCAST) {
+		/* need to stop rx only, tx is already down */
+		gfar_halt(priv);
+		gfar_filer_restore_table(priv);
+
 	} else {
 		phy_start(priv->phydev);
 	}
@@ -1710,8 +1834,10 @@
 	 * everything for us?  Resetting it takes the link down and requires
 	 * several seconds for it to come back.
 	 */
-	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
+	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
+		put_device(&tbiphy->dev);
 		return;
+	}
 
 	/* Single clk mode, mii mode off(for serdes communication) */
 	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
@@ -1723,6 +1849,8 @@
 	phy_write(tbiphy, MII_BMCR,
 		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
 		  BMCR_SPEED1000);
+
+	put_device(&tbiphy->dev);
 }
 
 static int __gfar_is_rx_idle(struct gfar_private *priv)
@@ -1970,8 +2098,7 @@
 		/* Install our interrupt handlers for Error,
 		 * Transmit, and Receive
 		 */
-		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
-				  IRQF_NO_SUSPEND,
+		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
 				  gfar_irq(grp, ER)->name, grp);
 		if (err < 0) {
 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -1979,6 +2106,8 @@
 
 			goto err_irq_fail;
 		}
+		enable_irq_wake(gfar_irq(grp, ER)->irq);
+
 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
 				  gfar_irq(grp, TX)->name, grp);
 		if (err < 0) {
@@ -1993,15 +2122,17 @@
 				  gfar_irq(grp, RX)->irq);
 			goto rx_irq_fail;
 		}
+		enable_irq_wake(gfar_irq(grp, RX)->irq);
+
 	} else {
-		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
-				  IRQF_NO_SUSPEND,
+		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
 				  gfar_irq(grp, TX)->name, grp);
 		if (err < 0) {
 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
 				  gfar_irq(grp, TX)->irq);
 			goto err_irq_fail;
 		}
+		enable_irq_wake(gfar_irq(grp, TX)->irq);
 	}
 
 	return 0;
@@ -2738,7 +2869,14 @@
 {
 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
 	unsigned long flags;
-	u32 imask;
+	u32 imask, ievent;
+
+	ievent = gfar_read(&grp->regs->ievent);
+
+	if (unlikely(ievent & IEVENT_FGPI)) {
+		gfar_write(&grp->regs->ievent, IEVENT_FGPI);
+		return IRQ_HANDLED;
+	}
 
 	if (likely(napi_schedule_prep(&grp->napi_rx))) {
 		spin_lock_irqsave(&grp->grplock, flags);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 8c19948..f266b20 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -340,6 +340,7 @@
 #define IEVENT_MAG		0x00000800
 #define IEVENT_GRSC		0x00000100
 #define IEVENT_RXF0		0x00000080
+#define IEVENT_FGPI		0x00000010
 #define IEVENT_FIR		0x00000008
 #define IEVENT_FIQ		0x00000004
 #define IEVENT_DPE		0x00000002
@@ -372,6 +373,7 @@
 #define IMASK_MAG		0x00000800
 #define IMASK_GRSC              0x00000100
 #define IMASK_RXFEN0		0x00000080
+#define IMASK_FGPI		0x00000010
 #define IMASK_FIR		0x00000008
 #define IMASK_FIQ		0x00000004
 #define IMASK_DPE		0x00000002
@@ -540,6 +542,9 @@
 
 #define GFAR_INT_NAME_MAX	(IFNAMSIZ + 6)	/* '_g#_xx' */
 
+#define GFAR_WOL_MAGIC		0x00000001
+#define GFAR_WOL_FILER_UCAST	0x00000002
+
 struct txbd8
 {
 	union {
@@ -917,6 +922,7 @@
 #define FSL_GIANFAR_DEV_HAS_BD_STASHING		0x00000200
 #define FSL_GIANFAR_DEV_HAS_BUF_STASHING	0x00000400
 #define FSL_GIANFAR_DEV_HAS_TIMER		0x00000800
+#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER	0x00001000
 
 #if (MAXGROUPS == 2)
 #define DEFAULT_MAPPING 	0xAA
@@ -1161,8 +1167,6 @@
 		extended_hash:1,
 		bd_stash_en:1,
 		rx_filer_enable:1,
-		/* Wake-on-LAN enabled */
-		wol_en:1,
 		/* Enable priorty based Tx scheduling in Hw */
 		prio_sched_en:1,
 		/* Flow control flags */
@@ -1191,6 +1195,10 @@
 	u32 __iomem *hash_regs[16];
 	int hash_width;
 
+	/* wake-on-lan settings */
+	u16 wol_opts;
+	u16 wol_supported;
+
 	/*Filer table*/
 	unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
 	unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 6bdc891..fb7f8d6 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -644,28 +644,49 @@
 {
 	struct gfar_private *priv = netdev_priv(dev);
 
-	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
-		wol->supported = WAKE_MAGIC;
-		wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
-	} else {
-		wol->supported = wol->wolopts = 0;
-	}
+	wol->supported = 0;
+	wol->wolopts = 0;
+
+	if (priv->wol_supported & GFAR_WOL_MAGIC)
+		wol->supported |= WAKE_MAGIC;
+
+	if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
+		wol->supported |= WAKE_UCAST;
+
+	if (priv->wol_opts & GFAR_WOL_MAGIC)
+		wol->wolopts |= WAKE_MAGIC;
+
+	if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
+		wol->wolopts |= WAKE_UCAST;
 }
 
 static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	u16 wol_opts = 0;
+	int err;
 
-	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
-	    wol->wolopts != 0)
+	if (!priv->wol_supported && wol->wolopts)
 		return -EINVAL;
 
-	if (wol->wolopts & ~WAKE_MAGIC)
+	if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
 		return -EINVAL;
 
-	device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+	if (wol->wolopts & WAKE_MAGIC) {
+		wol_opts |= GFAR_WOL_MAGIC;
+	} else {
+		if (wol->wolopts & WAKE_UCAST)
+			wol_opts |= GFAR_WOL_FILER_UCAST;
+	}
 
-	priv->wol_en = !!device_may_wakeup(&dev->dev);
+	wol_opts &= priv->wol_supported;
+	priv->wol_opts = 0;
+
+	err = device_set_wakeup_enable(priv->dev, wol_opts);
+	if (err)
+		return err;
+
+	priv->wol_opts = wol_opts;
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 8e3cd77..664d0c2 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -557,6 +557,7 @@
 	{ .compatible = "fsl,etsec-ptp" },
 	{},
 };
+MODULE_DEVICE_TABLE(of, match_table);
 
 static struct platform_driver gianfar_ptp_driver = {
 	.driver = {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 4dd40e0..650f788 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1384,6 +1384,8 @@
 		value = phy_read(tbiphy, ENET_TBI_MII_CR);
 		value &= ~0x1000;	/* Turn off autonegotiation */
 		phy_write(tbiphy, ENET_TBI_MII_CR, value);
+
+		put_device(&tbiphy->dev);
 	}
 
 	init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
@@ -1702,8 +1704,10 @@
 	 * everything for us?  Resetting it takes the link down and requires
 	 * several seconds for it to come back.
 	 */
-	if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
+	if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) {
+		put_device(&tbiphy->dev);
 		return;
+	}
 
 	/* Single clk mode, mii mode off(for serdes communication) */
 	phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
@@ -1711,6 +1715,8 @@
 	phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
 
 	phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
+
+	put_device(&tbiphy->dev);
 }
 
 /* Configure the PHY for dev.
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index dead17b..8d12b58 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -5,7 +5,7 @@
 config NET_VENDOR_HISILICON
 	bool "Hisilicon devices"
 	default y
-	depends on ARM
+	depends on ARM || ARM64
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y.
 
@@ -24,11 +24,42 @@
 
 config HIP04_ETH
 	tristate "HISILICON P04 Ethernet support"
-	select PHYLIB
 	select MARVELL_PHY
 	select MFD_SYSCON
+	select HNS_MDIO
 	---help---
 	  If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
 	  want to use the internal ethernet then you should answer Y to this.
 
+config HNS_MDIO
+	tristate
+	select PHYLIB
+	---help---
+	  This selects the HNS MDIO support. It is needed by HNS_DSAF to access
+	  the PHY
+
+config HNS
+	tristate "Hisilicon Network Subsystem Support (Framework)"
+	---help---
+	  This selects the framework support for Hisilicon Network Subsystem. It
+	  is needed by any driver which provides HNS acceleration engine or make
+	  use of the engine
+
+config HNS_DSAF
+	tristate "Hisilicon HNS DSAF device Support"
+	select HNS
+	select HNS_MDIO
+	---help---
+	  This selects the DSAF (Distributed System Area Frabric) network
+	  acceleration engine support. The engine is used in Hisilicon hip05,
+	  Hi1610 and further ICT SoC
+
+config HNS_ENET
+	tristate "Hisilicon HNS Ethernet Device Support"
+	select PHYLIB
+	select HNS
+	---help---
+	  This selects the general ethernet driver for HNS.  This module make
+	  use of any HNS AE driver, such as HNS_DSAF
+
 endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
index 6c14540..390b71f 100644
--- a/drivers/net/ethernet/hisilicon/Makefile
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -3,4 +3,6 @@
 #
 
 obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
-obj-$(CONFIG_HIP04_ETH) += hip04_mdio.o hip04_eth.o
+obj-$(CONFIG_HIP04_ETH) += hip04_eth.o
+obj-$(CONFIG_HNS_MDIO) += hns_mdio.o
+obj-$(CONFIG_HNS) += hns/
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index cc2d8b4..253f8ed 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -816,7 +816,7 @@
 	struct net_device *ndev;
 	struct hip04_priv *priv;
 	struct resource *res;
-	unsigned int irq;
+	int irq;
 	int ret;
 
 	ndev = alloc_etherdev(sizeof(struct hip04_priv));
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
deleted file mode 100644
index fca0a5b..0000000
--- a/drivers/net/ethernet/hisilicon/hip04_mdio.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/* Copyright (c) 2014 Linaro Ltd.
- * Copyright (c) 2014 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/of_mdio.h>
-#include <linux/delay.h>
-
-#define MDIO_CMD_REG		0x0
-#define MDIO_ADDR_REG		0x4
-#define MDIO_WDATA_REG		0x8
-#define MDIO_RDATA_REG		0xc
-#define MDIO_STA_REG		0x10
-
-#define MDIO_START		BIT(14)
-#define MDIO_R_VALID		BIT(1)
-#define MDIO_READ	        (BIT(12) | BIT(11) | MDIO_START)
-#define MDIO_WRITE	        (BIT(12) | BIT(10) | MDIO_START)
-
-struct hip04_mdio_priv {
-	void __iomem *base;
-};
-
-#define WAIT_TIMEOUT 10
-static int hip04_mdio_wait_ready(struct mii_bus *bus)
-{
-	struct hip04_mdio_priv *priv = bus->priv;
-	int i;
-
-	for (i = 0; readl_relaxed(priv->base + MDIO_CMD_REG) & MDIO_START; i++) {
-		if (i == WAIT_TIMEOUT)
-			return -ETIMEDOUT;
-		msleep(20);
-	}
-
-	return 0;
-}
-
-static int hip04_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-{
-	struct hip04_mdio_priv *priv = bus->priv;
-	u32 val;
-	int ret;
-
-	ret = hip04_mdio_wait_ready(bus);
-	if (ret < 0)
-		goto out;
-
-	val = regnum | (mii_id << 5) | MDIO_READ;
-	writel_relaxed(val, priv->base + MDIO_CMD_REG);
-
-	ret = hip04_mdio_wait_ready(bus);
-	if (ret < 0)
-		goto out;
-
-	val = readl_relaxed(priv->base + MDIO_STA_REG);
-	if (val & MDIO_R_VALID) {
-		dev_err(bus->parent, "SMI bus read not valid\n");
-		ret = -ENODEV;
-		goto out;
-	}
-
-	val = readl_relaxed(priv->base + MDIO_RDATA_REG);
-	ret = val & 0xFFFF;
-out:
-	return ret;
-}
-
-static int hip04_mdio_write(struct mii_bus *bus, int mii_id,
-			    int regnum, u16 value)
-{
-	struct hip04_mdio_priv *priv = bus->priv;
-	u32 val;
-	int ret;
-
-	ret = hip04_mdio_wait_ready(bus);
-	if (ret < 0)
-		goto out;
-
-	writel_relaxed(value, priv->base + MDIO_WDATA_REG);
-	val = regnum | (mii_id << 5) | MDIO_WRITE;
-	writel_relaxed(val, priv->base + MDIO_CMD_REG);
-out:
-	return ret;
-}
-
-static int hip04_mdio_reset(struct mii_bus *bus)
-{
-	int temp, i;
-
-	for (i = 0; i < PHY_MAX_ADDR; i++) {
-		hip04_mdio_write(bus, i, 22, 0);
-		temp = hip04_mdio_read(bus, i, MII_BMCR);
-		if (temp < 0)
-			continue;
-
-		temp |= BMCR_RESET;
-		if (hip04_mdio_write(bus, i, MII_BMCR, temp) < 0)
-			continue;
-	}
-
-	mdelay(500);
-	return 0;
-}
-
-static int hip04_mdio_probe(struct platform_device *pdev)
-{
-	struct resource *r;
-	struct mii_bus *bus;
-	struct hip04_mdio_priv *priv;
-	int ret;
-
-	bus = mdiobus_alloc_size(sizeof(struct hip04_mdio_priv));
-	if (!bus) {
-		dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
-		return -ENOMEM;
-	}
-
-	bus->name = "hip04_mdio_bus";
-	bus->read = hip04_mdio_read;
-	bus->write = hip04_mdio_write;
-	bus->reset = hip04_mdio_reset;
-	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
-	bus->parent = &pdev->dev;
-	priv = bus->priv;
-
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	priv->base = devm_ioremap_resource(&pdev->dev, r);
-	if (IS_ERR(priv->base)) {
-		ret = PTR_ERR(priv->base);
-		goto out_mdio;
-	}
-
-	ret = of_mdiobus_register(bus, pdev->dev.of_node);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
-		goto out_mdio;
-	}
-
-	platform_set_drvdata(pdev, bus);
-
-	return 0;
-
-out_mdio:
-	mdiobus_free(bus);
-	return ret;
-}
-
-static int hip04_mdio_remove(struct platform_device *pdev)
-{
-	struct mii_bus *bus = platform_get_drvdata(pdev);
-
-	mdiobus_unregister(bus);
-	mdiobus_free(bus);
-
-	return 0;
-}
-
-static const struct of_device_id hip04_mdio_match[] = {
-	{ .compatible = "hisilicon,hip04-mdio" },
-	{ }
-};
-MODULE_DEVICE_TABLE(of, hip04_mdio_match);
-
-static struct platform_driver hip04_mdio_driver = {
-	.probe = hip04_mdio_probe,
-	.remove = hip04_mdio_remove,
-	.driver = {
-		.name = "hip04-mdio",
-		.of_match_table = hip04_mdio_match,
-	},
-};
-
-module_platform_driver(hip04_mdio_driver);
-
-MODULE_DESCRIPTION("HISILICON P04 MDIO interface driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:hip04-mdio");
diff --git a/drivers/net/ethernet/hisilicon/hns/Makefile b/drivers/net/ethernet/hisilicon/hns/Makefile
new file mode 100644
index 0000000..6010c83
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+obj-$(CONFIG_HNS) += hnae.o
+
+obj-$(CONFIG_HNS_DSAF) += hns_dsaf.o
+hns_dsaf-objs = hns_ae_adapt.o hns_dsaf_gmac.o hns_dsaf_mac.o hns_dsaf_misc.o \
+	hns_dsaf_main.o hns_dsaf_ppe.o hns_dsaf_rcb.o hns_dsaf_xgmac.o
+
+obj-$(CONFIG_HNS_ENET) += hns_enet_drv.o
+hns_enet_drv-objs = hns_enet.o hns_ethtool.o
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
new file mode 100644
index 0000000..f52e99a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+
+#include "hnae.h"
+
+#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
+
+static struct class *hnae_class;
+
+static void
+hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(lock, flags);
+	list_add_tail_rcu(node, head);
+	spin_unlock_irqrestore(lock, flags);
+}
+
+static void hnae_list_del(spinlock_t *lock, struct list_head *node)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(lock, flags);
+	list_del_rcu(node);
+	spin_unlock_irqrestore(lock, flags);
+}
+
+static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+	unsigned int order = hnae_page_order(ring);
+	struct page *p = dev_alloc_pages(order);
+
+	if (!p)
+		return -ENOMEM;
+
+	cb->priv = p;
+	cb->page_offset = 0;
+	cb->reuse_flag = 0;
+	cb->buf  = page_address(p);
+	cb->length = hnae_page_size(ring);
+	cb->type = DESC_TYPE_PAGE;
+
+	return 0;
+}
+
+static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+	if (cb->type == DESC_TYPE_SKB)
+		dev_kfree_skb_any((struct sk_buff *)cb->priv);
+	else if (unlikely(is_rx_ring(ring)))
+		put_page((struct page *)cb->priv);
+	memset(cb, 0, sizeof(*cb));
+}
+
+static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+	cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
+			       cb->length, ring_to_dma_dir(ring));
+
+	if (dma_mapping_error(ring_to_dev(ring), cb->dma))
+		return -EIO;
+
+	return 0;
+}
+
+static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+	if (cb->type == DESC_TYPE_SKB)
+		dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
+				 ring_to_dma_dir(ring));
+	else
+		dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
+			       ring_to_dma_dir(ring));
+}
+
+static struct hnae_buf_ops hnae_bops = {
+	.alloc_buffer = hnae_alloc_buffer,
+	.free_buffer = hnae_free_buffer,
+	.map_buffer = hnae_map_buffer,
+	.unmap_buffer = hnae_unmap_buffer,
+};
+
+static int __ae_match(struct device *dev, const void *data)
+{
+	struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
+	const char *ae_id = data;
+
+	if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE))
+		return 1;
+
+	return 0;
+}
+
+static struct hnae_ae_dev *find_ae(const char *ae_id)
+{
+	struct device *dev;
+
+	WARN_ON(!ae_id);
+
+	dev = class_find_device(hnae_class, NULL, ae_id, __ae_match);
+
+	return dev ? cls_to_ae_dev(dev) : NULL;
+}
+
+static void hnae_free_buffers(struct hnae_ring *ring)
+{
+	int i;
+
+	for (i = 0; i < ring->desc_num; i++)
+		hnae_free_buffer_detach(ring, i);
+}
+
+/* Allocate memory for raw pkg, and map with dma */
+static int hnae_alloc_buffers(struct hnae_ring *ring)
+{
+	int i, j, ret;
+
+	for (i = 0; i < ring->desc_num; i++) {
+		ret = hnae_alloc_buffer_attach(ring, i);
+		if (ret)
+			goto out_buffer_fail;
+	}
+
+	return 0;
+
+out_buffer_fail:
+	for (j = i - 1; j >= 0; j--)
+		hnae_free_buffer_detach(ring, j);
+	return ret;
+}
+
+/* free desc along with its attached buffer */
+static void hnae_free_desc(struct hnae_ring *ring)
+{
+	hnae_free_buffers(ring);
+	dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
+			 ring->desc_num * sizeof(ring->desc[0]),
+			 ring_to_dma_dir(ring));
+	ring->desc_dma_addr = 0;
+	kfree(ring->desc);
+	ring->desc = NULL;
+}
+
+/* alloc desc, without buffer attached */
+static int hnae_alloc_desc(struct hnae_ring *ring)
+{
+	int size = ring->desc_num * sizeof(ring->desc[0]);
+
+	ring->desc = kzalloc(size, GFP_KERNEL);
+	if (!ring->desc)
+		return -ENOMEM;
+
+	ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
+		ring->desc, size, ring_to_dma_dir(ring));
+	if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
+		ring->desc_dma_addr = 0;
+		kfree(ring->desc);
+		ring->desc = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/* fini ring, also free the buffer for the ring */
+static void hnae_fini_ring(struct hnae_ring *ring)
+{
+	hnae_free_desc(ring);
+	kfree(ring->desc_cb);
+	ring->desc_cb = NULL;
+	ring->next_to_clean = 0;
+	ring->next_to_use = 0;
+}
+
+/* init ring, and with buffer for rx ring */
+static int
+hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
+{
+	int ret;
+
+	if (ring->desc_num <= 0 || ring->buf_size <= 0)
+		return -EINVAL;
+
+	ring->q = q;
+	ring->flags = flags;
+	assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
+
+	/* not matter for tx or rx ring, the ntc and ntc start from 0 */
+	assert(ring->next_to_use == 0);
+	assert(ring->next_to_clean == 0);
+
+	ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
+			GFP_KERNEL);
+	if (!ring->desc_cb) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = hnae_alloc_desc(ring);
+	if (ret)
+		goto out_with_desc_cb;
+
+	if (is_rx_ring(ring)) {
+		ret = hnae_alloc_buffers(ring);
+		if (ret)
+			goto out_with_desc;
+	}
+
+	return 0;
+
+out_with_desc:
+	hnae_free_desc(ring);
+out_with_desc_cb:
+	kfree(ring->desc_cb);
+	ring->desc_cb = NULL;
+out:
+	return ret;
+}
+
+static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
+			   struct hnae_ae_dev *dev)
+{
+	int ret;
+
+	q->dev = dev;
+	q->handle = h;
+
+	ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
+	if (ret)
+		goto out;
+
+	ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
+	if (ret)
+		goto out_with_tx_ring;
+
+	if (dev->ops->init_queue)
+		dev->ops->init_queue(q);
+
+	return 0;
+
+out_with_tx_ring:
+	hnae_fini_ring(&q->tx_ring);
+out:
+	return ret;
+}
+
+static void hnae_fini_queue(struct hnae_queue *q)
+{
+	if (q->dev->ops->fini_queue)
+		q->dev->ops->fini_queue(q);
+
+	hnae_fini_ring(&q->tx_ring);
+	hnae_fini_ring(&q->rx_ring);
+}
+
+/**
+ * ae_chain - define ae chain head
+ */
+static RAW_NOTIFIER_HEAD(ae_chain);
+
+int hnae_register_notifier(struct notifier_block *nb)
+{
+	return raw_notifier_chain_register(&ae_chain, nb);
+}
+EXPORT_SYMBOL(hnae_register_notifier);
+
+void hnae_unregister_notifier(struct notifier_block *nb)
+{
+	if (raw_notifier_chain_unregister(&ae_chain, nb))
+		dev_err(NULL, "notifier chain unregister fail\n");
+}
+EXPORT_SYMBOL(hnae_unregister_notifier);
+
+int hnae_reinit_handle(struct hnae_handle *handle)
+{
+	int i, j;
+	int ret;
+
+	for (i = 0; i < handle->q_num; i++) /* free ring*/
+		hnae_fini_queue(handle->qs[i]);
+
+	if (handle->dev->ops->reset)
+		handle->dev->ops->reset(handle);
+
+	for (i = 0; i < handle->q_num; i++) {/* reinit ring*/
+		ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
+		if (ret)
+			goto out_when_init_queue;
+	}
+	return 0;
+out_when_init_queue:
+	for (j = i - 1; j >= 0; j--)
+		hnae_fini_queue(handle->qs[j]);
+	return ret;
+}
+EXPORT_SYMBOL(hnae_reinit_handle);
+
+/* hnae_get_handle - get a handle from the AE
+ * @owner_dev: the dev use this handle
+ * @ae_id: the id of the ae to be used
+ * @ae_opts: the options set for the handle
+ * @bops: the callbacks for buffer management
+ *
+ * return handle ptr or ERR_PTR
+ */
+struct hnae_handle *hnae_get_handle(struct device *owner_dev,
+				    const char *ae_id, u32 port_id,
+				    struct hnae_buf_ops *bops)
+{
+	struct hnae_ae_dev *dev;
+	struct hnae_handle *handle;
+	int i, j;
+	int ret;
+
+	dev = find_ae(ae_id);
+	if (!dev)
+		return ERR_PTR(-ENODEV);
+
+	handle = dev->ops->get_handle(dev, port_id);
+	if (IS_ERR(handle))
+		return handle;
+
+	handle->dev = dev;
+	handle->owner_dev = owner_dev;
+	handle->bops = bops ? bops : &hnae_bops;
+	handle->eport_id = port_id;
+
+	for (i = 0; i < handle->q_num; i++) {
+		ret = hnae_init_queue(handle, handle->qs[i], dev);
+		if (ret)
+			goto out_when_init_queue;
+	}
+
+	__module_get(dev->owner);
+
+	hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
+
+	return handle;
+
+out_when_init_queue:
+	for (j = i - 1; j >= 0; j--)
+		hnae_fini_queue(handle->qs[j]);
+
+	return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(hnae_get_handle);
+
+void hnae_put_handle(struct hnae_handle *h)
+{
+	struct hnae_ae_dev *dev = h->dev;
+	int i;
+
+	for (i = 0; i < h->q_num; i++)
+		hnae_fini_queue(h->qs[i]);
+
+	if (h->dev->ops->reset)
+		h->dev->ops->reset(h);
+
+	hnae_list_del(&dev->lock, &h->node);
+
+	if (dev->ops->put_handle)
+		dev->ops->put_handle(h);
+
+	module_put(dev->owner);
+}
+EXPORT_SYMBOL(hnae_put_handle);
+
+static void hnae_release(struct device *dev)
+{
+}
+
+/**
+ * hnae_ae_register - register a AE engine to hnae framework
+ * @hdev: the hnae ae engine device
+ * @owner:  the module who provides this dev
+ * NOTE: the duplicated name will not be checked
+ */
+int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
+{
+	static atomic_t id = ATOMIC_INIT(-1);
+	int ret;
+
+	if (!hdev->dev)
+		return -ENODEV;
+
+	if (!hdev->ops || !hdev->ops->get_handle ||
+	    !hdev->ops->toggle_ring_irq ||
+	    !hdev->ops->toggle_queue_status ||
+	    !hdev->ops->get_status || !hdev->ops->adjust_link)
+		return -EINVAL;
+
+	hdev->owner = owner;
+	hdev->id = (int)atomic_inc_return(&id);
+	hdev->cls_dev.parent = hdev->dev;
+	hdev->cls_dev.class = hnae_class;
+	hdev->cls_dev.release = hnae_release;
+	(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
+	ret = device_register(&hdev->cls_dev);
+	if (ret)
+		return ret;
+
+	__module_get(THIS_MODULE);
+
+	INIT_LIST_HEAD(&hdev->handle_list);
+	spin_lock_init(&hdev->lock);
+
+	ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL);
+	if (ret)
+		dev_dbg(hdev->dev,
+			"has not notifier for AE: %s\n", hdev->name);
+
+	return 0;
+}
+EXPORT_SYMBOL(hnae_ae_register);
+
+/**
+ * hnae_ae_unregister - unregisters a HNAE AE engine
+ * @cdev: the device to unregister
+ */
+void hnae_ae_unregister(struct hnae_ae_dev *hdev)
+{
+	device_unregister(&hdev->cls_dev);
+	module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL(hnae_ae_unregister);
+
+static ssize_t handles_show(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	ssize_t s = 0;
+	struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
+	struct hnae_handle *h;
+	int i = 0, j;
+
+	list_for_each_entry_rcu(h, &hdev->handle_list, node) {
+		s += sprintf(buf + s, "handle %d (eport_id=%u from %s):\n",
+			    i++, h->eport_id, h->dev->name);
+		for (j = 0; j < h->q_num; j++) {
+			s += sprintf(buf + s, "\tqueue[%d] on %p\n",
+				     j, h->qs[i]->io_base);
+#define HANDEL_TX_MSG "\t\ttx_ring on %p:%u,%u,%u,%u,%u,%llu,%llu\n"
+			s += sprintf(buf + s,
+				     HANDEL_TX_MSG,
+				     h->qs[i]->tx_ring.io_base,
+				     h->qs[i]->tx_ring.buf_size,
+				     h->qs[i]->tx_ring.desc_num,
+				     h->qs[i]->tx_ring.max_desc_num_per_pkt,
+				     h->qs[i]->tx_ring.max_raw_data_sz_per_desc,
+				     h->qs[i]->tx_ring.max_pkt_size,
+				 h->qs[i]->tx_ring.stats.sw_err_cnt,
+				 h->qs[i]->tx_ring.stats.io_err_cnt);
+			s += sprintf(buf + s,
+				"\t\trx_ring on %p:%u,%u,%llu,%llu,%llu\n",
+				h->qs[i]->rx_ring.io_base,
+				h->qs[i]->rx_ring.buf_size,
+				h->qs[i]->rx_ring.desc_num,
+				h->qs[i]->rx_ring.stats.sw_err_cnt,
+				h->qs[i]->rx_ring.stats.io_err_cnt,
+				h->qs[i]->rx_ring.stats.seg_pkt_cnt);
+		}
+	}
+
+	return s;
+}
+
+static DEVICE_ATTR_RO(handles);
+static struct attribute *hnae_class_attrs[] = {
+	&dev_attr_handles.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(hnae_class);
+
+static int __init hnae_init(void)
+{
+	hnae_class = class_create(THIS_MODULE, "hnae");
+	if (IS_ERR(hnae_class))
+		return PTR_ERR(hnae_class);
+
+	hnae_class->dev_groups = hnae_class_groups;
+	return 0;
+}
+
+static void __exit hnae_exit(void)
+{
+	class_destroy(hnae_class);
+}
+
+subsys_initcall(hnae_init);
+module_exit(hnae_exit);
+
+MODULE_AUTHOR("Hisilicon, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
+
+/* vi: set tw=78 noet: */
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
new file mode 100644
index 0000000..d4a1eb1
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -0,0 +1,584 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HNAE_H
+#define __HNAE_H
+
+/* Names used in this framework:
+ *      ae handle (handle):
+ *        a set of queues provided by AE
+ *      ring buffer queue (rbq):
+ *        the channel between upper layer and the AE, can do tx and rx
+ *      ring:
+ *        a tx or rx channel within a rbq
+ *      ring description (desc):
+ *        an element in the ring with packet information
+ *      buffer:
+ *        a memory region referred by desc with the full packet payload
+ *
+ * "num" means a static number set as a parameter, "count" mean a dynamic
+ *   number set while running
+ * "cb" means control block
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/phy.h>
+#include <linux/types.h>
+
+#define HNAE_DRIVER_VERSION "1.3.0"
+#define HNAE_DRIVER_NAME "hns"
+#define HNAE_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
+#define HNAE_DRIVER_STRING "Hisilicon Network Subsystem Driver"
+#define HNAE_DEFAULT_DEVICE_DESCR "Hisilicon Network Subsystem"
+
+#ifdef DEBUG
+
+#ifndef assert
+#define assert(expr) \
+do { \
+	if (!(expr)) { \
+		pr_err("Assertion failed! %s, %s, %s, line %d\n", \
+			   #expr, __FILE__, __func__, __LINE__); \
+	} \
+} while (0)
+#endif
+
+#else
+
+#ifndef assert
+#define assert(expr)
+#endif
+
+#endif
+
+#define AE_VERSION_1 ('6' << 16 | '6' << 8 | '0')
+#define AE_VERSION_2 ('1' << 24 | '6' << 16 | '1' << 8 | '0')
+#define AE_NAME_SIZE 16
+
+/* some said the RX and TX RCB format should not be the same in the future. But
+ * it is the same now...
+ */
+#define RCB_REG_BASEADDR_L         0x00 /* P660 support only 32bit accessing */
+#define RCB_REG_BASEADDR_H         0x04
+#define RCB_REG_BD_NUM             0x08
+#define RCB_REG_BD_LEN             0x0C
+#define RCB_REG_PKTLINE            0x10
+#define RCB_REG_TAIL               0x18
+#define RCB_REG_HEAD               0x1C
+#define RCB_REG_FBDNUM             0x20
+#define RCB_REG_OFFSET             0x24 /* pkt num to be handled */
+#define RCB_REG_PKTNUM_RECORD      0x2C /* total pkt received */
+
+#define HNS_RX_HEAD_SIZE 256
+
+#define HNAE_AE_REGISTER 0x1
+
+#define RCB_RING_NAME_LEN 16
+
+enum hnae_led_state {
+	HNAE_LED_INACTIVE,
+	HNAE_LED_ACTIVE,
+	HNAE_LED_ON,
+	HNAE_LED_OFF
+};
+
+#define HNS_RX_FLAG_VLAN_PRESENT 0x1
+#define HNS_RX_FLAG_L3ID_IPV4 0x0
+#define HNS_RX_FLAG_L3ID_IPV6 0x1
+#define HNS_RX_FLAG_L4ID_UDP 0x0
+#define HNS_RX_FLAG_L4ID_TCP 0x1
+
+#define HNS_TXD_ASID_S 0
+#define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S)
+#define HNS_TXD_BUFNUM_S 8
+#define HNS_TXD_BUFNUM_M (0x3 << HNS_TXD_BUFNUM_S)
+#define HNS_TXD_PORTID_S 10
+#define HNS_TXD_PORTID_M (0x7 << HNS_TXD_PORTID_S)
+
+#define HNS_TXD_RA_B 8
+#define HNS_TXD_RI_B 9
+#define HNS_TXD_L4CS_B 10
+#define HNS_TXD_L3CS_B 11
+#define HNS_TXD_FE_B 12
+#define HNS_TXD_VLD_B 13
+#define HNS_TXD_IPOFFSET_S 14
+#define HNS_TXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
+
+#define HNS_RXD_IPOFFSET_S 0
+#define HNS_RXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
+#define HNS_RXD_BUFNUM_S 8
+#define HNS_RXD_BUFNUM_M (0x3 << HNS_RXD_BUFNUM_S)
+#define HNS_RXD_PORTID_S 10
+#define HNS_RXD_PORTID_M (0x7 << HNS_RXD_PORTID_S)
+#define HNS_RXD_DMAC_S 13
+#define HNS_RXD_DMAC_M (0x3 << HNS_RXD_DMAC_S)
+#define HNS_RXD_VLAN_S 15
+#define HNS_RXD_VLAN_M (0x3 << HNS_RXD_VLAN_S)
+#define HNS_RXD_L3ID_S 17
+#define HNS_RXD_L3ID_M (0xf << HNS_RXD_L3ID_S)
+#define HNS_RXD_L4ID_S 21
+#define HNS_RXD_L4ID_M (0xf << HNS_RXD_L4ID_S)
+#define HNS_RXD_FE_B 25
+#define HNS_RXD_FRAG_B 26
+#define HNS_RXD_VLD_B 27
+#define HNS_RXD_L2E_B 28
+#define HNS_RXD_L3E_B 29
+#define HNS_RXD_L4E_B 30
+#define HNS_RXD_DROP_B 31
+
+#define HNS_RXD_VLANID_S 8
+#define HNS_RXD_VLANID_M (0xfff << HNS_RXD_VLANID_S)
+#define HNS_RXD_CFI_B 20
+#define HNS_RXD_PRI_S 21
+#define HNS_RXD_PRI_M (0x7 << HNS_RXD_PRI_S)
+#define HNS_RXD_ASID_S 24
+#define HNS_RXD_ASID_M (0xff << HNS_RXD_ASID_S)
+
+/* hardware spec ring buffer format */
+struct __packed hnae_desc {
+	__le64 addr;
+	union {
+		struct {
+			__le16 asid_bufnum_pid;
+			__le16 send_size;
+			__le32 flag_ipoffset;
+			__le32 reserved_3[4];
+		} tx;
+
+		struct {
+			__le32 ipoff_bnum_pid_flag;
+			__le16 pkt_len;
+			__le16 size;
+			__le32 vlan_pri_asid;
+			__le32 reserved_2[3];
+		} rx;
+	};
+};
+
+struct hnae_desc_cb {
+	dma_addr_t dma; /* dma address of this desc */
+	void *buf;      /* cpu addr for a desc */
+
+	/* priv data for the desc, e.g. skb when use with ip stack*/
+	void *priv;
+	u16 page_offset;
+	u16 reuse_flag;
+
+	u16 length;     /* length of the buffer */
+
+       /* desc type, used by the ring user to mark the type of the priv data */
+	u16 type;
+};
+
+#define setflags(flags, bits) ((flags) |= (bits))
+#define unsetflags(flags, bits) ((flags) &= ~(bits))
+
+/* hnae_ring->flags fields */
+#define RINGF_DIR 0x1	    /* TX or RX ring, set if TX */
+#define is_tx_ring(ring) ((ring)->flags & RINGF_DIR)
+#define is_rx_ring(ring) (!is_tx_ring(ring))
+#define ring_to_dma_dir(ring) (is_tx_ring(ring) ? \
+	DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+struct ring_stats {
+	u64 io_err_cnt;
+	u64 sw_err_cnt;
+	u64 seg_pkt_cnt;
+	union {
+		struct {
+			u64 tx_pkts;
+			u64 tx_bytes;
+			u64 tx_err_cnt;
+			u64 restart_queue;
+			u64 tx_busy;
+		};
+		struct {
+			u64 rx_pkts;
+			u64 rx_bytes;
+			u64 rx_err_cnt;
+			u64 reuse_pg_cnt;
+			u64 err_pkt_len;
+			u64 non_vld_descs;
+			u64 err_bd_num;
+			u64 l2_err;
+			u64 l3l4_csum_err;
+		};
+	};
+};
+
+struct hnae_queue;
+
+struct hnae_ring {
+	u8 __iomem *io_base; /* base io address for the ring */
+	struct hnae_desc *desc; /* dma map address space */
+	struct hnae_desc_cb *desc_cb;
+	struct hnae_queue *q;
+	int irq;
+	char ring_name[RCB_RING_NAME_LEN];
+
+	/* statistic */
+	struct ring_stats stats;
+
+	dma_addr_t desc_dma_addr;
+	u32 buf_size;       /* size for hnae_desc->addr, preset by AE */
+	u16 desc_num;       /* total number of desc */
+	u16 max_desc_num_per_pkt;
+	u16 max_raw_data_sz_per_desc;
+	u16 max_pkt_size;
+	int next_to_use;    /* idx of next spare desc */
+
+	/* idx of lastest sent desc, the ring is empty when equal to
+	 * next_to_use
+	 */
+	int next_to_clean;
+
+	int flags;          /* ring attribute */
+	int irq_init_flag;
+};
+
+#define ring_ptr_move_fw(ring, p) \
+	((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
+#define ring_ptr_move_bw(ring, p) \
+	((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
+
+enum hns_desc_type {
+	DESC_TYPE_SKB,
+	DESC_TYPE_PAGE,
+};
+
+#define assert_is_ring_idx(ring, idx) \
+	assert((idx) >= 0 && (idx) < (ring)->desc_num)
+
+/* the distance between [begin, end) in a ring buffer
+ * note: there is a unuse slot between the begin and the end
+ */
+static inline int ring_dist(struct hnae_ring *ring, int begin, int end)
+{
+	assert_is_ring_idx(ring, begin);
+	assert_is_ring_idx(ring, end);
+
+	return (end - begin + ring->desc_num) % ring->desc_num;
+}
+
+static inline int ring_space(struct hnae_ring *ring)
+{
+	return ring->desc_num -
+		ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
+}
+
+static inline int is_ring_empty(struct hnae_ring *ring)
+{
+	assert_is_ring_idx(ring, ring->next_to_use);
+	assert_is_ring_idx(ring, ring->next_to_clean);
+
+	return ring->next_to_use == ring->next_to_clean;
+}
+
+#define hnae_buf_size(_ring) ((_ring)->buf_size)
+#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
+#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
+
+struct hnae_handle;
+
+/* allocate and dma map space for hnae desc */
+struct hnae_buf_ops {
+	int (*alloc_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+	void (*free_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+	int (*map_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+	void (*unmap_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+};
+
+struct hnae_queue {
+	void __iomem *io_base;
+	phys_addr_t phy_base;
+	struct hnae_ae_dev *dev;	/* the device who use this queue */
+	struct hnae_ring rx_ring, tx_ring;
+	struct hnae_handle *handle;
+};
+
+/*hnae loop mode*/
+enum hnae_loop {
+	MAC_INTERNALLOOP_MAC = 0,
+	MAC_INTERNALLOOP_SERDES,
+	MAC_INTERNALLOOP_PHY,
+	MAC_LOOP_NONE,
+};
+
+/*hnae port type*/
+enum hnae_port_type {
+	HNAE_PORT_SERVICE = 0,
+	HNAE_PORT_DEBUG
+};
+
+/* This struct defines the operation on the handle.
+ *
+ * get_handle(): (mandatory)
+ *   Get a handle from AE according to its name and options.
+ *   the AE driver should manage the space used by handle and its queues while
+ *   the HNAE framework will allocate desc and desc_cb for all rings in the
+ *   queues.
+ * put_handle():
+ *   Release the handle.
+ * start():
+ *   Enable the hardware, include all queues
+ * stop():
+ *   Disable the hardware
+ * set_opts(): (mandatory)
+ *   Set options to the AE
+ * get_opts(): (mandatory)
+ *   Get options from the AE
+ * get_status():
+ *   Get the carrier state of the back channel of the handle, 1 for ok, 0 for
+ *   non-ok
+ * toggle_ring_irq(): (mandatory)
+ *   Set the ring irq to be enabled(0) or disable(1)
+ * toggle_queue_status(): (mandatory)
+ *   Set the queue to be enabled(1) or disable(0), this will not change the
+ *   ring irq state
+ * adjust_link()
+ *   adjust link status
+ * set_loopback()
+ *   set loopback
+ * get_ring_bdnum_limit()
+ *   get ring bd number limit
+ * get_pauseparam()
+ *   get tx and rx of pause frame use
+ * set_autoneg()
+ *   set auto autonegotiation of pause frame use
+ * get_autoneg()
+ *   get auto autonegotiation of pause frame use
+ * set_pauseparam()
+ *   set tx and rx of pause frame use
+ * get_coalesce_usecs()
+ *   get usecs to delay a TX interrupt after a packet is sent
+ * get_rx_max_coalesced_frames()
+ *   get Maximum number of packets to be sent before a TX interrupt.
+ * set_coalesce_usecs()
+ *   set usecs to delay a TX interrupt after a packet is sent
+ * set_coalesce_frames()
+ *   set Maximum number of packets to be sent before a TX interrupt.
+ * get_ringnum()
+ *   get RX/TX ring number
+ * get_max_ringnum()
+ *   get RX/TX ring maximum number
+ * get_mac_addr()
+ *   get mac address
+ * set_mac_addr()
+ *   set mac address
+ * set_mc_addr()
+ *   set multicast mode
+ * set_mtu()
+ *   set mtu
+ * update_stats()
+ *   update Old network device statistics
+ * get_ethtool_stats()
+ *   get ethtool network device statistics
+ * get_strings()
+ *   get a set of strings that describe the requested objects
+ * get_sset_count()
+ *   get number of strings that @get_strings will write
+ * update_led_status()
+ *   update the led status
+ * set_led_id()
+ *   set led id
+ * get_regs()
+ *   get regs dump
+ * get_regs_len()
+ *   get the len of the regs dump
+ */
+struct hnae_ae_ops {
+	struct hnae_handle *(*get_handle)(struct hnae_ae_dev *dev,
+					  u32 port_id);
+	void (*put_handle)(struct hnae_handle *handle);
+	void (*init_queue)(struct hnae_queue *q);
+	void (*fini_queue)(struct hnae_queue *q);
+	int (*start)(struct hnae_handle *handle);
+	void (*stop)(struct hnae_handle *handle);
+	void (*reset)(struct hnae_handle *handle);
+	int (*set_opts)(struct hnae_handle *handle, int type, void *opts);
+	int (*get_opts)(struct hnae_handle *handle, int type, void **opts);
+	int (*get_status)(struct hnae_handle *handle);
+	int (*get_info)(struct hnae_handle *handle,
+			u8 *auto_neg, u16 *speed, u8 *duplex);
+	void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
+	void (*toggle_queue_status)(struct hnae_queue *queue, u32 val);
+	void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
+	int (*set_loopback)(struct hnae_handle *handle,
+			    enum hnae_loop loop_mode, int en);
+	void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
+				     u32 *uplimit);
+	void (*get_pauseparam)(struct hnae_handle *handle,
+			       u32 *auto_neg, u32 *rx_en, u32 *tx_en);
+	int (*set_autoneg)(struct hnae_handle *handle, u8 enable);
+	int (*get_autoneg)(struct hnae_handle *handle);
+	int (*set_pauseparam)(struct hnae_handle *handle,
+			      u32 auto_neg, u32 rx_en, u32 tx_en);
+	void (*get_coalesce_usecs)(struct hnae_handle *handle,
+				   u32 *tx_usecs, u32 *rx_usecs);
+	void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
+					    u32 *tx_frames, u32 *rx_frames);
+	void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
+	int (*set_coalesce_frames)(struct hnae_handle *handle,
+				   u32 coalesce_frames);
+	int (*get_mac_addr)(struct hnae_handle *handle, void **p);
+	int (*set_mac_addr)(struct hnae_handle *handle, void *p);
+	int (*set_mc_addr)(struct hnae_handle *handle, void *addr);
+	int (*set_mtu)(struct hnae_handle *handle, int new_mtu);
+	void (*update_stats)(struct hnae_handle *handle,
+			     struct net_device_stats *net_stats);
+	void (*get_stats)(struct hnae_handle *handle, u64 *data);
+	void (*get_strings)(struct hnae_handle *handle,
+			    u32 stringset, u8 *data);
+	int (*get_sset_count)(struct hnae_handle *handle, int stringset);
+	void (*update_led_status)(struct hnae_handle *handle);
+	int (*set_led_id)(struct hnae_handle *handle,
+			  enum hnae_led_state status);
+	void (*get_regs)(struct hnae_handle *handle, void *data);
+	int (*get_regs_len)(struct hnae_handle *handle);
+};
+
+struct hnae_ae_dev {
+	struct device cls_dev; /* the class dev */
+	struct device *dev; /* the presented dev */
+	struct hnae_ae_ops *ops;
+	struct list_head node;
+	struct module *owner; /* the module who provides this dev */
+	int id;
+	char name[AE_NAME_SIZE];
+	struct list_head handle_list;
+	spinlock_t lock; /* lock to protect the handle_list */
+};
+
+struct hnae_handle {
+	struct device *owner_dev; /* the device which make use of this handle */
+	struct hnae_ae_dev *dev;  /* the device who provides this handle */
+	struct device_node *phy_node;
+	phy_interface_t phy_if;
+	u32 if_support;
+	int q_num;
+	int vf_id;
+	u32 eport_id;
+	enum hnae_port_type port_type;
+	struct list_head node;    /* list to hnae_ae_dev->handle_list */
+	struct hnae_buf_ops *bops; /* operation for the buffer */
+	struct hnae_queue **qs;  /* array base of all queues */
+};
+
+#define ring_to_dev(ring) ((ring)->q->dev->dev)
+
+struct hnae_handle *hnae_get_handle(struct device *owner_dev, const char *ae_id,
+				    u32 port_id, struct hnae_buf_ops *bops);
+void hnae_put_handle(struct hnae_handle *handle);
+int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner);
+void hnae_ae_unregister(struct hnae_ae_dev *dev);
+
+int hnae_register_notifier(struct notifier_block *nb);
+void hnae_unregister_notifier(struct notifier_block *nb);
+int hnae_reinit_handle(struct hnae_handle *handle);
+
+#define hnae_queue_xmit(q, buf_num) writel_relaxed(buf_num, \
+	(q)->tx_ring.io_base + RCB_REG_TAIL)
+
+#ifndef assert
+#define assert(cond)
+#endif
+
+static inline int hnae_reserve_buffer_map(struct hnae_ring *ring,
+					  struct hnae_desc_cb *cb)
+{
+	struct hnae_buf_ops *bops = ring->q->handle->bops;
+	int ret;
+
+	ret = bops->alloc_buffer(ring, cb);
+	if (ret)
+		goto out;
+
+	ret = bops->map_buffer(ring, cb);
+	if (ret)
+		goto out_with_buf;
+
+	return 0;
+
+out_with_buf:
+	bops->free_buffer(ring, cb);
+out:
+	return ret;
+}
+
+static inline int hnae_alloc_buffer_attach(struct hnae_ring *ring, int i)
+{
+	int ret = hnae_reserve_buffer_map(ring, &ring->desc_cb[i]);
+
+	if (ret)
+		return ret;
+
+	ring->desc[i].addr = (__le64)ring->desc_cb[i].dma;
+
+	return 0;
+}
+
+static inline void hnae_buffer_detach(struct hnae_ring *ring, int i)
+{
+	ring->q->handle->bops->unmap_buffer(ring, &ring->desc_cb[i]);
+	ring->desc[i].addr = 0;
+}
+
+static inline void hnae_free_buffer_detach(struct hnae_ring *ring, int i)
+{
+	struct hnae_buf_ops *bops = ring->q->handle->bops;
+	struct hnae_desc_cb *cb = &ring->desc_cb[i];
+
+	if (!ring->desc_cb[i].dma)
+		return;
+
+	hnae_buffer_detach(ring, i);
+	bops->free_buffer(ring, cb);
+}
+
+/* detach a in-used buffer and replace with a reserved one  */
+static inline void hnae_replace_buffer(struct hnae_ring *ring, int i,
+				       struct hnae_desc_cb *res_cb)
+{
+	struct hnae_buf_ops *bops = ring->q->handle->bops;
+	struct hnae_desc_cb tmp_cb = ring->desc_cb[i];
+
+	bops->unmap_buffer(ring, &ring->desc_cb[i]);
+	ring->desc_cb[i] = *res_cb;
+	*res_cb = tmp_cb;
+	ring->desc[i].addr = (__le64)ring->desc_cb[i].dma;
+	ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
+}
+
+static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
+{
+	ring->desc_cb[i].reuse_flag = 0;
+	ring->desc[i].addr = (__le64)(ring->desc_cb[i].dma
+		+ ring->desc_cb[i].page_offset);
+	ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
+}
+
+#define hnae_set_field(origin, mask, shift, val) \
+	do { \
+		(origin) &= (~(mask)); \
+		(origin) |= ((val) << (shift)) & (mask); \
+	} while (0)
+
+#define hnae_set_bit(origin, shift, val) \
+	hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
+
+#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define hnae_get_bit(origin, shift) \
+	hnae_get_field((origin), (0x1 << (shift)), (shift))
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
new file mode 100644
index 0000000..a2c72f8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -0,0 +1,777 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+#include "hnae.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+
+#define AE_NAME_PORT_ID_IDX 6
+#define ETH_STATIC_REG	 1
+#define ETH_DUMP_REG	 5
+#define ETH_GSTRING_LEN	32
+
+static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
+{
+	struct  hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+	return vf_cb->mac_cb;
+}
+
+/**
+ * hns_ae_map_eport_to_dport - translate enet port id to dsaf port id
+ * @port_id: enet port id
+ *: debug port 0-1, service port 2 -7 (dsaf mode only 2)
+ * return: dsaf port id
+ *: service ports 0 - 5, debug port 6-7
+ **/
+static int hns_ae_map_eport_to_dport(u32 port_id)
+{
+	int port_index;
+
+	if (port_id < DSAF_DEBUG_NW_NUM)
+		port_index = port_id + DSAF_SERVICE_PORT_NUM_PER_DSAF;
+	else
+		port_index = port_id - DSAF_DEBUG_NW_NUM;
+
+	return port_index;
+}
+
+static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
+{
+	return container_of(dev, struct dsaf_device, ae_dev);
+}
+
+static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
+{
+	int ppe_index;
+	int ppe_common_index;
+	struct ppe_common_cb *ppe_comm;
+	struct  hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+	if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
+		ppe_index = vf_cb->port_index;
+		ppe_common_index = 0;
+	} else {
+		ppe_index = 0;
+		ppe_common_index =
+			vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
+	}
+	ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index];
+	return &ppe_comm->ppe_cb[ppe_index];
+}
+
+static int hns_ae_get_q_num_per_vf(
+	struct dsaf_device *dsaf_dev, int port)
+{
+	int common_idx = hns_dsaf_get_comm_idx_by_port(port);
+
+	return dsaf_dev->rcb_common[common_idx]->max_q_per_vf;
+}
+
+static int hns_ae_get_vf_num_per_port(
+	struct dsaf_device *dsaf_dev, int port)
+{
+	int common_idx = hns_dsaf_get_comm_idx_by_port(port);
+
+	return dsaf_dev->rcb_common[common_idx]->max_vfn;
+}
+
+static struct ring_pair_cb *hns_ae_get_base_ring_pair(
+	struct dsaf_device *dsaf_dev, int port)
+{
+	int common_idx = hns_dsaf_get_comm_idx_by_port(port);
+	struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[common_idx];
+	int q_num = rcb_comm->max_q_per_vf;
+	int vf_num = rcb_comm->max_vfn;
+
+	if (common_idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
+		return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
+	else
+		return &rcb_comm->ring_pair_cb[0];
+}
+
+static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
+{
+	return container_of(q, struct ring_pair_cb, q);
+}
+
+struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
+				      u32 port_id)
+{
+	int port_idx;
+	int vfnum_per_port;
+	int qnum_per_vf;
+	int i;
+	struct dsaf_device *dsaf_dev;
+	struct hnae_handle *ae_handle;
+	struct ring_pair_cb *ring_pair_cb;
+	struct hnae_vf_cb *vf_cb;
+
+	dsaf_dev = hns_ae_get_dsaf_dev(dev);
+	port_idx = hns_ae_map_eport_to_dport(port_id);
+
+	ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_idx);
+	vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_idx);
+	qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_idx);
+
+	vf_cb = kzalloc(sizeof(*vf_cb) +
+			qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
+	if (unlikely(!vf_cb)) {
+		dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
+		ae_handle = ERR_PTR(-ENOMEM);
+		goto handle_err;
+	}
+	ae_handle = &vf_cb->ae_handle;
+	/* ae_handle Init  */
+	ae_handle->owner_dev = dsaf_dev->dev;
+	ae_handle->dev = dev;
+	ae_handle->q_num = qnum_per_vf;
+
+	/* find ring pair, and set vf id*/
+	for (ae_handle->vf_id = 0;
+		ae_handle->vf_id < vfnum_per_port; ae_handle->vf_id++) {
+		if (!ring_pair_cb->used_by_vf)
+			break;
+		ring_pair_cb += qnum_per_vf;
+	}
+	if (ae_handle->vf_id >= vfnum_per_port) {
+		dev_err(dsaf_dev->dev, "malloc queue fail!\n");
+		ae_handle = ERR_PTR(-EINVAL);
+		goto vf_id_err;
+	}
+
+	ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1);
+	for (i = 0; i < qnum_per_vf; i++) {
+		ae_handle->qs[i] = &ring_pair_cb->q;
+		ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
+		ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
+
+		ring_pair_cb->used_by_vf = 1;
+		if (port_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+			ring_pair_cb->port_id_in_dsa = port_idx;
+		else
+			ring_pair_cb->port_id_in_dsa = 0;
+
+		ring_pair_cb++;
+	}
+
+	vf_cb->dsaf_dev = dsaf_dev;
+	vf_cb->port_index = port_idx;
+	vf_cb->mac_cb = &dsaf_dev->mac_cb[port_idx];
+
+	ae_handle->phy_if = vf_cb->mac_cb->phy_if;
+	ae_handle->phy_node = vf_cb->mac_cb->phy_node;
+	ae_handle->if_support = vf_cb->mac_cb->if_support;
+	ae_handle->port_type = vf_cb->mac_cb->mac_type;
+
+	return ae_handle;
+vf_id_err:
+	kfree(vf_cb);
+handle_err:
+	return ae_handle;
+}
+
+static void hns_ae_put_handle(struct hnae_handle *handle)
+{
+	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+	int i;
+
+	vf_cb->mac_cb	 = NULL;
+
+	kfree(vf_cb);
+
+	for (i = 0; i < handle->q_num; i++)
+		hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
+}
+
+static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
+{
+	int q_num = handle->q_num;
+	int i;
+
+	for (i = 0; i < q_num; i++)
+		hns_rcb_ring_enable_hw(handle->qs[i], val);
+}
+
+static void hns_ae_init_queue(struct hnae_queue *q)
+{
+	struct ring_pair_cb *ring =
+		container_of(q, struct ring_pair_cb, q);
+
+	hns_rcb_init_hw(ring);
+}
+
+static void hns_ae_fini_queue(struct hnae_queue *q)
+{
+	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(q->handle);
+
+	if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+		hns_rcb_reset_ring_hw(q);
+}
+
+static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
+{
+	int ret;
+	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+	if (!p || !is_valid_ether_addr((const u8 *)p)) {
+		dev_err(handle->owner_dev, "is not valid ether addr !\n");
+		return -EADDRNOTAVAIL;
+	}
+
+	ret = hns_mac_change_vf_addr(mac_cb, handle->vf_id, p);
+	if (ret != 0) {
+		dev_err(handle->owner_dev,
+			"set_mac_address fail, ret=%d!\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
+{
+	int ret;
+	char *mac_addr = (char *)addr;
+	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+	assert(mac_cb);
+
+	if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+		return 0;
+
+	ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, ENABLE);
+	if (ret) {
+		dev_err(handle->owner_dev,
+			"mac add mul_mac:%pM port%d  fail, ret = %#x!\n",
+			mac_addr, mac_cb->mac_id, ret);
+		return ret;
+	}
+
+	ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM,
+				mac_addr, ENABLE);
+	if (ret)
+		dev_err(handle->owner_dev,
+			"mac add mul_mac:%pM port%d  fail, ret = %#x!\n",
+			mac_addr, DSAF_BASE_INNER_PORT_NUM, ret);
+
+	return ret;
+}
+
+static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
+{
+	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+	return hns_mac_set_mtu(mac_cb, new_mtu);
+}
+
+static int hns_ae_start(struct hnae_handle *handle)
+{
+	int ret;
+	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+	ret = hns_mac_vm_config_bc_en(mac_cb, 0, ENABLE);
+	if (ret)
+		return ret;
+
+	hns_ae_ring_enable_all(handle, 1);
+	msleep(100);
+
+	hns_mac_start(mac_cb);
+
+	return 0;
+}
+
+void hns_ae_stop(struct hnae_handle *handle)
+{
+	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+	/* just clean tx fbd, neednot rx fbd*/
+	hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_TX);
+
+	msleep(20);
+
+	hns_mac_stop(mac_cb);
+
+	usleep_range(10000, 20000);
+
+	hns_ae_ring_enable_all(handle, 0);
+
+	(void)hns_mac_vm_config_bc_en(mac_cb, 0, DISABLE);
+}
+
+static void hns_ae_reset(struct hnae_handle *handle)
+{
+	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+	if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
+		u8 ppe_common_index =
+			vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
+
+		hns_mac_reset(vf_cb->mac_cb);
+		hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index);
+	}
+}
+
+void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
+{
+	u32 flag;
+
+	if (is_tx_ring(ring))
+		flag = RCB_INT_FLAG_TX;
+	else
+		flag = RCB_INT_FLAG_RX;
+
+	hns_rcb_int_clr_hw(ring->q, flag);
+	hns_rcb_int_ctrl_hw(ring->q, flag, mask);
+}
+
+static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val)
+{
+	hns_rcb_start(queue, val);
+}
+
+static int hns_ae_get_link_status(struct hnae_handle *handle)
+{
+	u32 link_status;
+	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+	hns_mac_get_link_status(mac_cb, &link_status);
+
+	return !!link_status;
+}
+
+static int hns_ae_get_mac_info(struct hnae_handle *handle,
+			       u8 *auto_neg, u16 *speed, u8 *duplex)
+{
+	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+	return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
+}
+
+static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
+			       int duplex)
+{
+	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+	hns_mac_adjust_link(mac_cb, speed, duplex);
+}
+
+static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
+					u32 *uplimit)
+{
+	*uplimit = HNS_RCB_RING_MAX_PENDING_BD;
+}
+
+static void hns_ae_get_pauseparam(struct hnae_handle *handle,
+				  u32 *auto_neg, u32 *rx_en, u32 *tx_en)
+{
+	assert(handle);
+
+	hns_mac_get_autoneg(hns_get_mac_cb(handle), auto_neg);
+
+	hns_mac_get_pauseparam(hns_get_mac_cb(handle), rx_en, tx_en);
+}
+
+static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
+{
+	assert(handle);
+
+	return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable);
+}
+
+static int hns_ae_get_autoneg(struct hnae_handle *handle)
+{
+	u32     auto_neg;
+
+	assert(handle);
+
+	hns_mac_get_autoneg(hns_get_mac_cb(handle), &auto_neg);
+
+	return auto_neg;
+}
+
+static int hns_ae_set_pauseparam(struct hnae_handle *handle,
+				 u32 autoneg, u32 rx_en, u32 tx_en)
+{
+	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+	int ret;
+
+	ret = hns_mac_set_autoneg(mac_cb, autoneg);
+	if (ret)
+		return ret;
+
+	return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
+}
+
+static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
+				      u32 *tx_usecs, u32 *rx_usecs)
+{
+	int port;
+
+	port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+	*tx_usecs = hns_rcb_get_coalesce_usecs(
+		hns_ae_get_dsaf_dev(handle->dev),
+		hns_dsaf_get_comm_idx_by_port(port));
+	*rx_usecs = hns_rcb_get_coalesce_usecs(
+		hns_ae_get_dsaf_dev(handle->dev),
+		hns_dsaf_get_comm_idx_by_port(port));
+}
+
+static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
+					       u32 *tx_frames, u32 *rx_frames)
+{
+	int port;
+
+	assert(handle);
+
+	port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+	*tx_frames = hns_rcb_get_coalesced_frames(
+		hns_ae_get_dsaf_dev(handle->dev), port);
+	*rx_frames = hns_rcb_get_coalesced_frames(
+		hns_ae_get_dsaf_dev(handle->dev), port);
+}
+
+static void hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
+				      u32 timeout)
+{
+	int port;
+
+	assert(handle);
+
+	port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+	hns_rcb_set_coalesce_usecs(hns_ae_get_dsaf_dev(handle->dev),
+				   port, timeout);
+}
+
+static int  hns_ae_set_coalesce_frames(struct hnae_handle *handle,
+				       u32 coalesce_frames)
+{
+	int port;
+	int ret;
+
+	assert(handle);
+
+	port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+	ret = hns_rcb_set_coalesced_frames(hns_ae_get_dsaf_dev(handle->dev),
+					   port, coalesce_frames);
+	return ret;
+}
+
+void hns_ae_update_stats(struct hnae_handle *handle,
+			 struct net_device_stats *net_stats)
+{
+	int port;
+	int idx;
+	struct dsaf_device *dsaf_dev;
+	struct hns_mac_cb *mac_cb;
+	struct hns_ppe_cb *ppe_cb;
+	struct hnae_queue *queue;
+	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+	u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0;
+	u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0;
+	u64 rx_missed_errors = 0;
+
+	dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+	if (!dsaf_dev)
+		return;
+	port = vf_cb->port_index;
+	ppe_cb = hns_get_ppe_cb(handle);
+	mac_cb = hns_get_mac_cb(handle);
+
+	for (idx = 0; idx < handle->q_num; idx++) {
+		queue = handle->qs[idx];
+		hns_rcb_update_stats(queue);
+
+		tx_bytes += queue->tx_ring.stats.tx_bytes;
+		tx_packets += queue->tx_ring.stats.tx_pkts;
+		rx_bytes += queue->rx_ring.stats.rx_bytes;
+		rx_packets += queue->rx_ring.stats.rx_pkts;
+
+		rx_errors += queue->rx_ring.stats.err_pkt_len
+				+ queue->rx_ring.stats.l2_err
+				+ queue->rx_ring.stats.l3l4_csum_err;
+	}
+
+	hns_ppe_update_stats(ppe_cb);
+	rx_missed_errors = ppe_cb->hw_stats.rx_drop_no_buf;
+	tx_errors += ppe_cb->hw_stats.tx_err_checksum
+		+ ppe_cb->hw_stats.tx_err_fifo_empty;
+
+	if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
+		hns_dsaf_update_stats(dsaf_dev, port);
+		/* for port upline direction, i.e., rx. */
+		rx_missed_errors += dsaf_dev->hw_stats[port].bp_drop;
+		rx_missed_errors += dsaf_dev->hw_stats[port].pad_drop;
+		rx_missed_errors += dsaf_dev->hw_stats[port].crc_false;
+
+		/* for port downline direction, i.e., tx. */
+		port = port + DSAF_PPE_INODE_BASE;
+		hns_dsaf_update_stats(dsaf_dev, port);
+		tx_dropped += dsaf_dev->hw_stats[port].bp_drop;
+		tx_dropped += dsaf_dev->hw_stats[port].pad_drop;
+		tx_dropped += dsaf_dev->hw_stats[port].crc_false;
+		tx_dropped += dsaf_dev->hw_stats[port].rslt_drop;
+		tx_dropped += dsaf_dev->hw_stats[port].vlan_drop;
+		tx_dropped += dsaf_dev->hw_stats[port].stp_drop;
+	}
+
+	hns_mac_update_stats(mac_cb);
+	rx_errors += mac_cb->hw_stats.rx_fifo_overrun_err;
+
+	tx_errors += mac_cb->hw_stats.tx_bad_pkts
+		+ mac_cb->hw_stats.tx_fragment_err
+		+ mac_cb->hw_stats.tx_jabber_err
+		+ mac_cb->hw_stats.tx_underrun_err
+		+ mac_cb->hw_stats.tx_crc_err;
+
+	net_stats->tx_bytes = tx_bytes;
+	net_stats->tx_packets = tx_packets;
+	net_stats->rx_bytes = rx_bytes;
+	net_stats->rx_dropped = 0;
+	net_stats->rx_packets = rx_packets;
+	net_stats->rx_errors = rx_errors;
+	net_stats->tx_errors = tx_errors;
+	net_stats->tx_dropped = tx_dropped;
+	net_stats->rx_missed_errors = rx_missed_errors;
+	net_stats->rx_crc_errors = mac_cb->hw_stats.rx_fcs_err;
+	net_stats->rx_frame_errors = mac_cb->hw_stats.rx_align_err;
+	net_stats->rx_fifo_errors = mac_cb->hw_stats.rx_fifo_overrun_err;
+	net_stats->rx_length_errors = mac_cb->hw_stats.rx_len_err;
+	net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts;
+}
+
+void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
+{
+	int idx;
+	struct hns_mac_cb *mac_cb;
+	struct hns_ppe_cb *ppe_cb;
+	u64 *p = data;
+	struct  hnae_vf_cb *vf_cb;
+
+	if (!handle || !data) {
+		pr_err("hns_ae_get_stats NULL handle or data pointer!\n");
+		return;
+	}
+
+	vf_cb = hns_ae_get_vf_cb(handle);
+	mac_cb = hns_get_mac_cb(handle);
+	ppe_cb = hns_get_ppe_cb(handle);
+
+	for (idx = 0; idx < handle->q_num; idx++) {
+		hns_rcb_get_stats(handle->qs[idx], p);
+		p += hns_rcb_get_ring_sset_count((int)ETH_SS_STATS);
+	}
+
+	hns_ppe_get_stats(ppe_cb, p);
+	p += hns_ppe_get_sset_count((int)ETH_SS_STATS);
+
+	hns_mac_get_stats(mac_cb, p);
+	p += hns_mac_get_sset_count(mac_cb, (int)ETH_SS_STATS);
+
+	if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+		hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
+}
+
+void hns_ae_get_strings(struct hnae_handle *handle,
+			u32 stringset, u8 *data)
+{
+	int port;
+	int idx;
+	struct hns_mac_cb *mac_cb;
+	struct hns_ppe_cb *ppe_cb;
+	u8 *p = data;
+	struct	hnae_vf_cb *vf_cb;
+
+	assert(handle);
+
+	vf_cb = hns_ae_get_vf_cb(handle);
+	port = vf_cb->port_index;
+	mac_cb = hns_get_mac_cb(handle);
+	ppe_cb = hns_get_ppe_cb(handle);
+
+	for (idx = 0; idx < handle->q_num; idx++) {
+		hns_rcb_get_strings(stringset, p, idx);
+		p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset);
+	}
+
+	hns_ppe_get_strings(ppe_cb, stringset, p);
+	p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset);
+
+	hns_mac_get_strings(mac_cb, stringset, p);
+	p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
+
+	if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+		hns_dsaf_get_strings(stringset, p, port);
+}
+
+int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
+{
+	u32 sset_count = 0;
+	struct hns_mac_cb *mac_cb;
+
+	assert(handle);
+
+	mac_cb = hns_get_mac_cb(handle);
+
+	sset_count += hns_rcb_get_ring_sset_count(stringset) * handle->q_num;
+	sset_count += hns_ppe_get_sset_count(stringset);
+	sset_count += hns_mac_get_sset_count(mac_cb, stringset);
+
+	if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+		sset_count += hns_dsaf_get_sset_count(stringset);
+
+	return sset_count;
+}
+
+static int hns_ae_config_loopback(struct hnae_handle *handle,
+				  enum hnae_loop loop, int en)
+{
+	int ret;
+	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+	switch (loop) {
+	case MAC_INTERNALLOOP_SERDES:
+		ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en);
+		break;
+	case MAC_INTERNALLOOP_MAC:
+		ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+void hns_ae_update_led_status(struct hnae_handle *handle)
+{
+	struct hns_mac_cb *mac_cb;
+
+	assert(handle);
+	mac_cb = hns_get_mac_cb(handle);
+	if (!mac_cb->cpld_vaddr)
+		return;
+	hns_set_led_opt(mac_cb);
+}
+
+int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
+			   enum hnae_led_state status)
+{
+	struct hns_mac_cb *mac_cb;
+
+	assert(handle);
+
+	mac_cb = hns_get_mac_cb(handle);
+
+	return hns_cpld_led_set_id(mac_cb, status);
+}
+
+void hns_ae_get_regs(struct hnae_handle *handle, void *data)
+{
+	u32 *p = data;
+	u32 rcb_com_idx;
+	int i;
+	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+	struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
+
+	hns_ppe_get_regs(ppe_cb, p);
+	p += hns_ppe_get_regs_count();
+
+	rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index);
+	hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p);
+	p += hns_rcb_get_common_regs_count();
+
+	for (i = 0; i < handle->q_num; i++) {
+		hns_rcb_get_ring_regs(handle->qs[i], p);
+		p += hns_rcb_get_ring_regs_count();
+	}
+
+	hns_mac_get_regs(vf_cb->mac_cb, p);
+	p += hns_mac_get_regs_count(vf_cb->mac_cb);
+
+	if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+		hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
+}
+
+int hns_ae_get_regs_len(struct hnae_handle *handle)
+{
+	u32 total_num;
+	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+	total_num = hns_ppe_get_regs_count();
+	total_num += hns_rcb_get_common_regs_count();
+	total_num += hns_rcb_get_ring_regs_count() * handle->q_num;
+	total_num += hns_mac_get_regs_count(vf_cb->mac_cb);
+
+	if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+		total_num += hns_dsaf_get_regs_count();
+
+	return total_num;
+}
+
+static struct hnae_ae_ops hns_dsaf_ops = {
+	.get_handle = hns_ae_get_handle,
+	.put_handle = hns_ae_put_handle,
+	.init_queue = hns_ae_init_queue,
+	.fini_queue = hns_ae_fini_queue,
+	.start = hns_ae_start,
+	.stop = hns_ae_stop,
+	.reset = hns_ae_reset,
+	.toggle_ring_irq = hns_ae_toggle_ring_irq,
+	.toggle_queue_status = hns_ae_toggle_queue_status,
+	.get_status = hns_ae_get_link_status,
+	.get_info = hns_ae_get_mac_info,
+	.adjust_link = hns_ae_adjust_link,
+	.set_loopback = hns_ae_config_loopback,
+	.get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
+	.get_pauseparam = hns_ae_get_pauseparam,
+	.set_autoneg = hns_ae_set_autoneg,
+	.get_autoneg = hns_ae_get_autoneg,
+	.set_pauseparam = hns_ae_set_pauseparam,
+	.get_coalesce_usecs = hns_ae_get_coalesce_usecs,
+	.get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
+	.set_coalesce_usecs = hns_ae_set_coalesce_usecs,
+	.set_coalesce_frames = hns_ae_set_coalesce_frames,
+	.set_mac_addr = hns_ae_set_mac_address,
+	.set_mc_addr = hns_ae_set_multicast_one,
+	.set_mtu = hns_ae_set_mtu,
+	.update_stats = hns_ae_update_stats,
+	.get_stats = hns_ae_get_stats,
+	.get_strings = hns_ae_get_strings,
+	.get_sset_count = hns_ae_get_sset_count,
+	.update_led_status = hns_ae_update_led_status,
+	.set_led_id = hns_ae_cpld_set_led_id,
+	.get_regs = hns_ae_get_regs,
+	.get_regs_len = hns_ae_get_regs_len
+};
+
+int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
+{
+	struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
+
+	ae_dev->ops = &hns_dsaf_ops;
+	ae_dev->dev = dsaf_dev->dev;
+
+	return hnae_ae_register(ae_dev, THIS_MODULE);
+}
+
+void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev)
+{
+	hnae_ae_unregister(&dsaf_dev->ae_dev);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
new file mode 100644
index 0000000..b8517b0
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -0,0 +1,704 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/of_mdio.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_gmac.h"
+
+static const struct mac_stats_string g_gmac_stats_string[] = {
+	{"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
+	{"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
+	{"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
+	{"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+	{"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
+	{"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
+	{"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
+	{"gmac_rx_pkts_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
+	{"gmac_rx_pkts_256to511", MAC_STATS_FIELD_OFF(rx_256to511)},
+	{"gmac_rx_pkts_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)},
+	{"gmac_rx_pkts_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)},
+	{"gmac_rx_pkts_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)},
+	{"gmac_rx_fcs_errors", MAC_STATS_FIELD_OFF(rx_fcs_err)},
+	{"gmac_rx_tagged", MAC_STATS_FIELD_OFF(rx_vlan_pkts)},
+	{"gmac_rx_data_err", MAC_STATS_FIELD_OFF(rx_data_err)},
+	{"gmac_rx_align_errors", MAC_STATS_FIELD_OFF(rx_align_err)},
+	{"gmac_rx_long_errors", MAC_STATS_FIELD_OFF(rx_oversize)},
+	{"gmac_rx_jabber_errors", MAC_STATS_FIELD_OFF(rx_jabber_err)},
+	{"gmac_rx_pause_maccontrol", MAC_STATS_FIELD_OFF(rx_pfc_tc0)},
+	{"gmac_rx_unknown_maccontrol", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)},
+	{"gmac_rx_very_long_err", MAC_STATS_FIELD_OFF(rx_long_err)},
+	{"gmac_rx_runt_err", MAC_STATS_FIELD_OFF(rx_minto64)},
+	{"gmac_rx_short_err", MAC_STATS_FIELD_OFF(rx_under_min)},
+	{"gmac_rx_filt_pkt", MAC_STATS_FIELD_OFF(rx_filter_bytes)},
+	{"gmac_rx_octets_total_filt", MAC_STATS_FIELD_OFF(rx_filter_pkts)},
+	{"gmac_rx_overrun_cnt", MAC_STATS_FIELD_OFF(rx_fifo_overrun_err)},
+	{"gmac_rx_length_err", MAC_STATS_FIELD_OFF(rx_len_err)},
+	{"gmac_rx_fail_comma", MAC_STATS_FIELD_OFF(rx_comma_err)},
+
+	{"gmac_tx_octets_ok", MAC_STATS_FIELD_OFF(tx_good_bytes)},
+	{"gmac_tx_octets_bad", MAC_STATS_FIELD_OFF(tx_bad_bytes)},
+	{"gmac_tx_uc_pkts", MAC_STATS_FIELD_OFF(tx_uc_pkts)},
+	{"gmac_tx_mc_pkts", MAC_STATS_FIELD_OFF(tx_mc_pkts)},
+	{"gmac_tx_bc_pkts", MAC_STATS_FIELD_OFF(tx_bc_pkts)},
+	{"gmac_tx_pkts_64octets", MAC_STATS_FIELD_OFF(tx_64bytes)},
+	{"gmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)},
+	{"gmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)},
+	{"gmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)},
+	{"gmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)},
+	{"gmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)},
+	{"gmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)},
+	{"gmac_tx_excessive_length_drop", MAC_STATS_FIELD_OFF(tx_jabber_err)},
+	{"gmac_tx_underrun", MAC_STATS_FIELD_OFF(tx_underrun_err)},
+	{"gmac_tx_tagged", MAC_STATS_FIELD_OFF(tx_vlan)},
+	{"gmac_tx_crc_error", MAC_STATS_FIELD_OFF(tx_crc_err)},
+	{"gmac_tx_pause_frames", MAC_STATS_FIELD_OFF(tx_pfc_tc0)}
+};
+
+static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	/*enable GE rX/tX */
+	if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+		dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
+
+	if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+		dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
+}
+
+static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	/*disable GE rX/tX */
+	if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+		dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
+
+	if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+		dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
+}
+
+/**
+*hns_gmac_get_en - get port enable
+*@mac_drv:mac device
+*@rx:rx enable
+*@tx:tx enable
+*/
+static void hns_gmac_get_en(void *mac_drv, u32 *rx, u32 *tx)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	u32 porten;
+
+	porten = dsaf_read_dev(drv, GMAC_PORT_EN_REG);
+	*tx = dsaf_get_bit(porten, GMAC_PORT_TX_EN_B);
+	*rx = dsaf_get_bit(porten, GMAC_PORT_RX_EN_B);
+}
+
+static void hns_gmac_free(void *mac_drv)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	struct dsaf_device *dsaf_dev
+		= (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+	u32 mac_id = drv->mac_id;
+
+	hns_dsaf_ge_srst_by_port(dsaf_dev, mac_id, 0);
+}
+
+static void hns_gmac_set_tx_auto_pause_frames(void *mac_drv, u16 newval)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	dsaf_set_dev_field(drv, GMAC_FC_TX_TIMER_REG, GMAC_FC_TX_TIMER_M,
+			   GMAC_FC_TX_TIMER_S, newval);
+}
+
+static void hns_gmac_get_tx_auto_pause_frames(void *mac_drv, u16 *newval)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	*newval = dsaf_get_dev_field(drv, GMAC_FC_TX_TIMER_REG,
+				     GMAC_FC_TX_TIMER_M, GMAC_FC_TX_TIMER_S);
+}
+
+static void hns_gmac_set_rx_auto_pause_frames(void *mac_drv, u32 newval)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	dsaf_set_dev_bit(drv, GMAC_PAUSE_EN_REG,
+			 GMAC_PAUSE_EN_RX_FDFC_B, !!newval);
+}
+
+static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	dsaf_set_dev_field(drv, GMAC_MAX_FRM_SIZE_REG, GMAC_MAX_FRM_SIZE_M,
+			   GMAC_MAX_FRM_SIZE_S, newval);
+
+	dsaf_set_dev_field(drv, GAMC_RX_MAX_FRAME, GMAC_MAX_FRM_SIZE_M,
+			   GMAC_MAX_FRM_SIZE_S, newval);
+}
+
+static void hns_gmac_config_an_mode(void *mac_drv, u8 newval)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	dsaf_set_dev_bit(drv, GMAC_TRANSMIT_CONTROL_REG,
+			 GMAC_TX_AN_EN_B, !!newval);
+}
+
+static void hns_gmac_tx_loop_pkt_dis(void *mac_drv)
+{
+	u32 tx_loop_pkt_pri;
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	tx_loop_pkt_pri = dsaf_read_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG);
+	dsaf_set_bit(tx_loop_pkt_pri, GMAC_TX_LOOP_PKT_EN_B, 1);
+	dsaf_set_bit(tx_loop_pkt_pri, GMAC_TX_LOOP_PKT_HIG_PRI_B, 0);
+	dsaf_write_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG, tx_loop_pkt_pri);
+}
+
+static void hns_gmac_set_duplex_type(void *mac_drv, u8 newval)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
+			 GMAC_DUPLEX_TYPE_B, !!newval);
+}
+
+static void hns_gmac_get_duplex_type(void *mac_drv,
+				     enum hns_gmac_duplex_mdoe *duplex_mode)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	*duplex_mode = (enum hns_gmac_duplex_mdoe)dsaf_get_dev_bit(
+		drv, GMAC_DUPLEX_TYPE_REG, GMAC_DUPLEX_TYPE_B);
+}
+
+static void hns_gmac_get_port_mode(void *mac_drv, enum hns_port_mode *port_mode)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	*port_mode = (enum hns_port_mode)dsaf_get_dev_field(
+		drv, GMAC_PORT_MODE_REG, GMAC_PORT_MODE_M, GMAC_PORT_MODE_S);
+}
+
+static void hns_gmac_port_mode_get(void *mac_drv,
+				   struct hns_gmac_port_mode_cfg *port_mode)
+{
+	u32 tx_ctrl;
+	u32 recv_ctrl;
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	port_mode->port_mode = (enum hns_port_mode)dsaf_get_dev_field(
+		drv, GMAC_PORT_MODE_REG, GMAC_PORT_MODE_M, GMAC_PORT_MODE_S);
+
+	tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+	recv_ctrl = dsaf_read_dev(drv, GMAC_RECV_CONTROL_REG);
+
+	port_mode->max_frm_size =
+		dsaf_get_dev_field(drv, GMAC_MAX_FRM_SIZE_REG,
+				   GMAC_MAX_FRM_SIZE_M, GMAC_MAX_FRM_SIZE_S);
+	port_mode->short_runts_thr =
+		dsaf_get_dev_field(drv, GMAC_SHORT_RUNTS_THR_REG,
+				   GMAC_SHORT_RUNTS_THR_M,
+				   GMAC_SHORT_RUNTS_THR_S);
+
+	port_mode->pad_enable = dsaf_get_bit(tx_ctrl, GMAC_TX_PAD_EN_B);
+	port_mode->crc_add = dsaf_get_bit(tx_ctrl, GMAC_TX_CRC_ADD_B);
+	port_mode->an_enable = dsaf_get_bit(tx_ctrl, GMAC_TX_AN_EN_B);
+
+	port_mode->runt_pkt_en =
+		dsaf_get_bit(recv_ctrl, GMAC_RECV_CTRL_RUNT_PKT_EN_B);
+	port_mode->strip_pad_en =
+		dsaf_get_bit(recv_ctrl, GMAC_RECV_CTRL_STRIP_PAD_EN_B);
+}
+
+static void hns_gmac_pause_frm_cfg(void *mac_drv, u32 rx_pause_en,
+				   u32 tx_pause_en)
+{
+	u32 pause_en;
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	pause_en = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+	dsaf_set_bit(pause_en, GMAC_PAUSE_EN_RX_FDFC_B, !!rx_pause_en);
+	dsaf_set_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B, !!tx_pause_en);
+	dsaf_write_dev(drv, GMAC_PAUSE_EN_REG, pause_en);
+}
+
+static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
+				      u32 *tx_pause_en)
+{
+	u32 pause_en;
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	pause_en = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+
+	*rx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_RX_FDFC_B);
+	*tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
+}
+
+static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
+				u32 full_duplex)
+{
+	u32 tx_ctrl;
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
+			 GMAC_DUPLEX_TYPE_B, !!full_duplex);
+
+	switch (speed) {
+	case MAC_SPEED_10:
+		dsaf_set_dev_field(
+			drv, GMAC_PORT_MODE_REG,
+			GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x6);
+		break;
+	case MAC_SPEED_100:
+		dsaf_set_dev_field(
+			drv, GMAC_PORT_MODE_REG,
+			GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x7);
+		break;
+	case MAC_SPEED_1000:
+		dsaf_set_dev_field(
+			drv, GMAC_PORT_MODE_REG,
+			GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x8);
+		break;
+	default:
+		dev_err(drv->dev,
+			"hns_gmac_adjust_link fail, speed%d mac%d\n",
+			speed, drv->mac_id);
+		return -EINVAL;
+	}
+
+	tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+	dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, 1);
+	dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, 1);
+	dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
+
+	dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
+			 GMAC_MODE_CHANGE_EB_B, 1);
+
+	return 0;
+}
+
+static void hns_gmac_init(void *mac_drv)
+{
+	u32 port;
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	struct dsaf_device *dsaf_dev
+		= (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+	port = drv->mac_id;
+
+	hns_dsaf_ge_srst_by_port(dsaf_dev, port, 0);
+	mdelay(10);
+	hns_dsaf_ge_srst_by_port(dsaf_dev, port, 1);
+	mdelay(10);
+	hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
+	hns_gmac_tx_loop_pkt_dis(mac_drv);
+}
+
+void hns_gmac_update_stats(void *mac_drv)
+{
+	struct mac_hw_stats *hw_stats = NULL;
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	hw_stats = &drv->mac_cb->hw_stats;
+
+	/* RX */
+	hw_stats->rx_good_bytes
+		+= dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_OK_REG);
+	hw_stats->rx_bad_bytes
+		+= dsaf_read_dev(drv, GMAC_RX_OCTETS_BAD_REG);
+	hw_stats->rx_uc_pkts += dsaf_read_dev(drv, GMAC_RX_UC_PKTS_REG);
+	hw_stats->rx_mc_pkts += dsaf_read_dev(drv, GMAC_RX_MC_PKTS_REG);
+	hw_stats->rx_bc_pkts += dsaf_read_dev(drv, GMAC_RX_BC_PKTS_REG);
+	hw_stats->rx_64bytes
+		+= dsaf_read_dev(drv, GMAC_RX_PKTS_64OCTETS_REG);
+	hw_stats->rx_65to127
+		+= dsaf_read_dev(drv, GMAC_RX_PKTS_65TO127OCTETS_REG);
+	hw_stats->rx_128to255
+		+= dsaf_read_dev(drv, GMAC_RX_PKTS_128TO255OCTETS_REG);
+	hw_stats->rx_256to511
+		+= dsaf_read_dev(drv, GMAC_RX_PKTS_255TO511OCTETS_REG);
+	hw_stats->rx_512to1023
+		+= dsaf_read_dev(drv, GMAC_RX_PKTS_512TO1023OCTETS_REG);
+	hw_stats->rx_1024to1518
+		+= dsaf_read_dev(drv, GMAC_RX_PKTS_1024TO1518OCTETS_REG);
+	hw_stats->rx_1519tomax
+		+= dsaf_read_dev(drv, GMAC_RX_PKTS_1519TOMAXOCTETS_REG);
+	hw_stats->rx_fcs_err += dsaf_read_dev(drv, GMAC_RX_FCS_ERRORS_REG);
+	hw_stats->rx_vlan_pkts += dsaf_read_dev(drv, GMAC_RX_TAGGED_REG);
+	hw_stats->rx_data_err += dsaf_read_dev(drv, GMAC_RX_DATA_ERR_REG);
+	hw_stats->rx_align_err
+		+= dsaf_read_dev(drv, GMAC_RX_ALIGN_ERRORS_REG);
+	hw_stats->rx_oversize
+		+= dsaf_read_dev(drv, GMAC_RX_LONG_ERRORS_REG);
+	hw_stats->rx_jabber_err
+		+= dsaf_read_dev(drv, GMAC_RX_JABBER_ERRORS_REG);
+	hw_stats->rx_pfc_tc0
+		+= dsaf_read_dev(drv, GMAC_RX_PAUSE_MACCTRL_FRAM_REG);
+	hw_stats->rx_unknown_ctrl
+		+= dsaf_read_dev(drv, GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG);
+	hw_stats->rx_long_err
+		+= dsaf_read_dev(drv, GMAC_RX_VERY_LONG_ERR_CNT_REG);
+	hw_stats->rx_minto64
+		+= dsaf_read_dev(drv, GMAC_RX_RUNT_ERR_CNT_REG);
+	hw_stats->rx_under_min
+		+= dsaf_read_dev(drv, GMAC_RX_SHORT_ERR_CNT_REG);
+	hw_stats->rx_filter_pkts
+		+= dsaf_read_dev(drv, GMAC_RX_FILT_PKT_CNT_REG);
+	hw_stats->rx_filter_bytes
+		+= dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_FILT_REG);
+	hw_stats->rx_fifo_overrun_err
+		+= dsaf_read_dev(drv, GMAC_RX_OVERRUN_CNT_REG);
+	hw_stats->rx_len_err
+		+= dsaf_read_dev(drv, GMAC_RX_LENGTHFIELD_ERR_CNT_REG);
+	hw_stats->rx_comma_err
+		+= dsaf_read_dev(drv, GMAC_RX_FAIL_COMMA_CNT_REG);
+
+	/* TX */
+	hw_stats->tx_good_bytes
+		+= dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_OK_REG);
+	hw_stats->tx_bad_bytes
+		+= dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_BAD_REG);
+	hw_stats->tx_uc_pkts += dsaf_read_dev(drv, GMAC_TX_UC_PKTS_REG);
+	hw_stats->tx_mc_pkts += dsaf_read_dev(drv, GMAC_TX_MC_PKTS_REG);
+	hw_stats->tx_bc_pkts += dsaf_read_dev(drv, GMAC_TX_BC_PKTS_REG);
+	hw_stats->tx_64bytes
+		+= dsaf_read_dev(drv, GMAC_TX_PKTS_64OCTETS_REG);
+	hw_stats->tx_65to127
+		+= dsaf_read_dev(drv, GMAC_TX_PKTS_65TO127OCTETS_REG);
+	hw_stats->tx_128to255
+		+= dsaf_read_dev(drv, GMAC_TX_PKTS_128TO255OCTETS_REG);
+	hw_stats->tx_256to511
+		+= dsaf_read_dev(drv, GMAC_TX_PKTS_255TO511OCTETS_REG);
+	hw_stats->tx_512to1023
+		+= dsaf_read_dev(drv, GMAC_TX_PKTS_512TO1023OCTETS_REG);
+	hw_stats->tx_1024to1518
+		+= dsaf_read_dev(drv, GMAC_TX_PKTS_1024TO1518OCTETS_REG);
+	hw_stats->tx_1519tomax
+		+= dsaf_read_dev(drv, GMAC_TX_PKTS_1519TOMAXOCTETS_REG);
+	hw_stats->tx_jabber_err
+		+= dsaf_read_dev(drv, GMAC_TX_EXCESSIVE_LENGTH_DROP_REG);
+	hw_stats->tx_underrun_err
+		+= dsaf_read_dev(drv, GMAC_TX_UNDERRUN_REG);
+	hw_stats->tx_vlan += dsaf_read_dev(drv, GMAC_TX_TAGGED_REG);
+	hw_stats->tx_crc_err += dsaf_read_dev(drv, GMAC_TX_CRC_ERROR_REG);
+	hw_stats->tx_pfc_tc0
+		+= dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
+}
+
+static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	if (drv->mac_id >= DSAF_SERVICE_NW_NUM) {
+		u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+
+		u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+			| (mac_addr[3] << 16) | (mac_addr[2] << 24);
+		dsaf_write_dev(drv, GMAC_STATION_ADDR_LOW_2_REG, low_val);
+		dsaf_write_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG, high_val);
+	}
+}
+
+static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode,
+				    u8 enable)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	switch (loop_mode) {
+	case MAC_INTERNALLOOP_MAC:
+		dsaf_set_dev_bit(drv, GMAC_LOOP_REG, GMAC_LP_REG_CF2MI_LP_EN_B,
+				 !!enable);
+		break;
+	default:
+		dev_err(drv->dev, "loop_mode error\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+	u32 tx_ctrl;
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+	dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
+	dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
+	dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
+}
+
+static void hns_gmac_get_id(void *mac_drv, u8 *mac_id)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	*mac_id = drv->mac_id;
+}
+
+static void hns_gmac_get_info(void *mac_drv, struct mac_info *mac_info)
+{
+	enum hns_gmac_duplex_mdoe duplex;
+	enum hns_port_mode speed;
+	u32 rx_pause;
+	u32 tx_pause;
+	u32 rx;
+	u32 tx;
+	u16 fc_tx_timer;
+	struct hns_gmac_port_mode_cfg port_mode = { GMAC_10M_MII, 0 };
+
+	hns_gmac_port_mode_get(mac_drv, &port_mode);
+	mac_info->pad_and_crc_en = port_mode.crc_add && port_mode.pad_enable;
+	mac_info->auto_neg = port_mode.an_enable;
+
+	hns_gmac_get_tx_auto_pause_frames(mac_drv, &fc_tx_timer);
+	mac_info->tx_pause_time = fc_tx_timer;
+
+	hns_gmac_get_en(mac_drv, &rx, &tx);
+	mac_info->port_en = rx && tx;
+
+	hns_gmac_get_duplex_type(mac_drv, &duplex);
+	mac_info->duplex = duplex;
+
+	hns_gmac_get_port_mode(mac_drv, &speed);
+	switch (speed) {
+	case GMAC_10M_SGMII:
+		mac_info->speed = MAC_SPEED_10;
+		break;
+	case GMAC_100M_SGMII:
+		mac_info->speed = MAC_SPEED_100;
+		break;
+	case GMAC_1000M_SGMII:
+		mac_info->speed = MAC_SPEED_1000;
+		break;
+	default:
+		mac_info->speed = 0;
+		break;
+	}
+
+	hns_gmac_get_pausefrm_cfg(mac_drv, &rx_pause, &tx_pause);
+	mac_info->rx_pause_en = rx_pause;
+	mac_info->tx_pause_en = tx_pause;
+}
+
+static void hns_gmac_autoneg_stat(void *mac_drv, u32 *enable)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	*enable = dsaf_get_dev_bit(drv, GMAC_TRANSMIT_CONTROL_REG,
+				   GMAC_TX_AN_EN_B);
+}
+
+static void hns_gmac_get_link_status(void *mac_drv, u32 *link_stat)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	*link_stat = dsaf_get_dev_bit(drv, GMAC_AN_NEG_STATE_REG,
+				      GMAC_AN_NEG_STAT_RX_SYNC_OK_B);
+}
+
+static void hns_gmac_get_regs(void *mac_drv, void *data)
+{
+	u32 *regs = data;
+	int i;
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	/* base config registers */
+	regs[0] = dsaf_read_dev(drv, GMAC_DUPLEX_TYPE_REG);
+	regs[1] = dsaf_read_dev(drv, GMAC_FD_FC_TYPE_REG);
+	regs[2] = dsaf_read_dev(drv, GMAC_FC_TX_TIMER_REG);
+	regs[3] = dsaf_read_dev(drv, GMAC_FD_FC_ADDR_LOW_REG);
+	regs[4] = dsaf_read_dev(drv, GMAC_FD_FC_ADDR_HIGH_REG);
+	regs[5] = dsaf_read_dev(drv, GMAC_IPG_TX_TIMER_REG);
+	regs[6] = dsaf_read_dev(drv, GMAC_PAUSE_THR_REG);
+	regs[7] = dsaf_read_dev(drv, GMAC_MAX_FRM_SIZE_REG);
+	regs[8] = dsaf_read_dev(drv, GMAC_PORT_MODE_REG);
+	regs[9] = dsaf_read_dev(drv, GMAC_PORT_EN_REG);
+	regs[10] = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+	regs[11] = dsaf_read_dev(drv, GMAC_SHORT_RUNTS_THR_REG);
+	regs[12] = dsaf_read_dev(drv, GMAC_AN_NEG_STATE_REG);
+	regs[13] = dsaf_read_dev(drv, GMAC_TX_LOCAL_PAGE_REG);
+	regs[14] = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+	regs[15] = dsaf_read_dev(drv, GMAC_REC_FILT_CONTROL_REG);
+	regs[16] = dsaf_read_dev(drv, GMAC_PTP_CONFIG_REG);
+
+	/* rx static registers */
+	regs[17] = dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_OK_REG);
+	regs[18] = dsaf_read_dev(drv, GMAC_RX_OCTETS_BAD_REG);
+	regs[19] = dsaf_read_dev(drv, GMAC_RX_UC_PKTS_REG);
+	regs[20] = dsaf_read_dev(drv, GMAC_RX_MC_PKTS_REG);
+	regs[21] = dsaf_read_dev(drv, GMAC_RX_BC_PKTS_REG);
+	regs[22] = dsaf_read_dev(drv, GMAC_RX_PKTS_64OCTETS_REG);
+	regs[23] = dsaf_read_dev(drv, GMAC_RX_PKTS_65TO127OCTETS_REG);
+	regs[24] = dsaf_read_dev(drv, GMAC_RX_PKTS_128TO255OCTETS_REG);
+	regs[25] = dsaf_read_dev(drv, GMAC_RX_PKTS_255TO511OCTETS_REG);
+	regs[26] = dsaf_read_dev(drv, GMAC_RX_PKTS_512TO1023OCTETS_REG);
+	regs[27] = dsaf_read_dev(drv, GMAC_RX_PKTS_1024TO1518OCTETS_REG);
+	regs[28] = dsaf_read_dev(drv, GMAC_RX_PKTS_1519TOMAXOCTETS_REG);
+	regs[29] = dsaf_read_dev(drv, GMAC_RX_FCS_ERRORS_REG);
+	regs[30] = dsaf_read_dev(drv, GMAC_RX_TAGGED_REG);
+	regs[31] = dsaf_read_dev(drv, GMAC_RX_DATA_ERR_REG);
+	regs[32] = dsaf_read_dev(drv, GMAC_RX_ALIGN_ERRORS_REG);
+	regs[33] = dsaf_read_dev(drv, GMAC_RX_LONG_ERRORS_REG);
+	regs[34] = dsaf_read_dev(drv, GMAC_RX_JABBER_ERRORS_REG);
+	regs[35] = dsaf_read_dev(drv, GMAC_RX_PAUSE_MACCTRL_FRAM_REG);
+	regs[36] = dsaf_read_dev(drv, GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG);
+	regs[37] = dsaf_read_dev(drv, GMAC_RX_VERY_LONG_ERR_CNT_REG);
+	regs[38] = dsaf_read_dev(drv, GMAC_RX_RUNT_ERR_CNT_REG);
+	regs[39] = dsaf_read_dev(drv, GMAC_RX_SHORT_ERR_CNT_REG);
+	regs[40] = dsaf_read_dev(drv, GMAC_RX_FILT_PKT_CNT_REG);
+	regs[41] = dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_FILT_REG);
+
+	/* tx static registers */
+	regs[42] = dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_OK_REG);
+	regs[43] = dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_BAD_REG);
+	regs[44] = dsaf_read_dev(drv, GMAC_TX_UC_PKTS_REG);
+	regs[45] = dsaf_read_dev(drv, GMAC_TX_MC_PKTS_REG);
+	regs[46] = dsaf_read_dev(drv, GMAC_TX_BC_PKTS_REG);
+	regs[47] = dsaf_read_dev(drv, GMAC_TX_PKTS_64OCTETS_REG);
+	regs[48] = dsaf_read_dev(drv, GMAC_TX_PKTS_65TO127OCTETS_REG);
+	regs[49] = dsaf_read_dev(drv, GMAC_TX_PKTS_128TO255OCTETS_REG);
+	regs[50] = dsaf_read_dev(drv, GMAC_TX_PKTS_255TO511OCTETS_REG);
+	regs[51] = dsaf_read_dev(drv, GMAC_TX_PKTS_512TO1023OCTETS_REG);
+	regs[52] = dsaf_read_dev(drv, GMAC_TX_PKTS_1024TO1518OCTETS_REG);
+	regs[53] = dsaf_read_dev(drv, GMAC_TX_PKTS_1519TOMAXOCTETS_REG);
+	regs[54] = dsaf_read_dev(drv, GMAC_TX_EXCESSIVE_LENGTH_DROP_REG);
+	regs[55] = dsaf_read_dev(drv, GMAC_TX_UNDERRUN_REG);
+	regs[56] = dsaf_read_dev(drv, GMAC_TX_TAGGED_REG);
+	regs[57] = dsaf_read_dev(drv, GMAC_TX_CRC_ERROR_REG);
+	regs[58] = dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
+
+	regs[59] = dsaf_read_dev(drv, GAMC_RX_MAX_FRAME);
+	regs[60] = dsaf_read_dev(drv, GMAC_LINE_LOOP_BACK_REG);
+	regs[61] = dsaf_read_dev(drv, GMAC_CF_CRC_STRIP_REG);
+	regs[62] = dsaf_read_dev(drv, GMAC_MODE_CHANGE_EN_REG);
+	regs[63] = dsaf_read_dev(drv, GMAC_SIXTEEN_BIT_CNTR_REG);
+	regs[64] = dsaf_read_dev(drv, GMAC_LD_LINK_COUNTER_REG);
+	regs[65] = dsaf_read_dev(drv, GMAC_LOOP_REG);
+	regs[66] = dsaf_read_dev(drv, GMAC_RECV_CONTROL_REG);
+	regs[67] = dsaf_read_dev(drv, GMAC_VLAN_CODE_REG);
+	regs[68] = dsaf_read_dev(drv, GMAC_RX_OVERRUN_CNT_REG);
+	regs[69] = dsaf_read_dev(drv, GMAC_RX_LENGTHFIELD_ERR_CNT_REG);
+	regs[70] = dsaf_read_dev(drv, GMAC_RX_FAIL_COMMA_CNT_REG);
+
+	regs[71] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_0_REG);
+	regs[72] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_0_REG);
+	regs[73] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_1_REG);
+	regs[74] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_1_REG);
+	regs[75] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_2_REG);
+	regs[76] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG);
+	regs[77] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_3_REG);
+	regs[78] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_3_REG);
+	regs[79] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_4_REG);
+	regs[80] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_4_REG);
+	regs[81] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_5_REG);
+	regs[82] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_5_REG);
+	regs[83] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_MSK_0_REG);
+	regs[84] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_MSK_0_REG);
+	regs[85] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_MSK_1_REG);
+	regs[86] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_MSK_1_REG);
+	regs[87] = dsaf_read_dev(drv, GMAC_MAC_SKIP_LEN_REG);
+	regs[88] = dsaf_read_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG);
+
+	/* mark end of mac regs */
+	for (i = 89; i < 96; i++)
+		regs[i] = 0xaaaaaaaa;
+}
+
+static void hns_gmac_get_stats(void *mac_drv, u64 *data)
+{
+	u32 i;
+	u64 *buf = data;
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	struct mac_hw_stats *hw_stats = NULL;
+
+	hw_stats = &drv->mac_cb->hw_stats;
+
+	for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
+		buf[i] = DSAF_STATS_READ(hw_stats,
+			g_gmac_stats_string[i].offset);
+	}
+}
+
+static void hns_gmac_get_strings(u32 stringset, u8 *data)
+{
+	char *buff = (char *)data;
+	u32 i;
+
+	if (stringset != ETH_SS_STATS)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
+		snprintf(buff, ETH_GSTRING_LEN, g_gmac_stats_string[i].desc);
+		buff = buff + ETH_GSTRING_LEN;
+	}
+}
+
+static int hns_gmac_get_sset_count(int stringset)
+{
+	if (stringset == ETH_SS_STATS)
+		return ARRAY_SIZE(g_gmac_stats_string);
+
+	return 0;
+}
+
+static int hns_gmac_get_regs_count(void)
+{
+	return ETH_GMAC_DUMP_NUM;
+}
+
+void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
+{
+	struct mac_driver *mac_drv;
+
+	mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL);
+	if (!mac_drv)
+		return NULL;
+
+	mac_drv->mac_init = hns_gmac_init;
+	mac_drv->mac_enable = hns_gmac_enable;
+	mac_drv->mac_disable = hns_gmac_disable;
+	mac_drv->mac_free = hns_gmac_free;
+	mac_drv->adjust_link = hns_gmac_adjust_link;
+	mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
+	mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
+	mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
+
+	mac_drv->mac_id = mac_param->mac_id;
+	mac_drv->mac_mode = mac_param->mac_mode;
+	mac_drv->io_base = mac_param->vaddr;
+	mac_drv->dev = mac_param->dev;
+	mac_drv->mac_cb = mac_cb;
+
+	mac_drv->set_mac_addr = hns_gmac_set_mac_addr;
+	mac_drv->set_an_mode = hns_gmac_config_an_mode;
+	mac_drv->config_loopback = hns_gmac_config_loopback;
+	mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc;
+	mac_drv->config_half_duplex = hns_gmac_set_duplex_type;
+	mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames;
+	mac_drv->mac_get_id = hns_gmac_get_id;
+	mac_drv->get_info = hns_gmac_get_info;
+	mac_drv->autoneg_stat = hns_gmac_autoneg_stat;
+	mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg;
+	mac_drv->get_link_status = hns_gmac_get_link_status;
+	mac_drv->get_regs = hns_gmac_get_regs;
+	mac_drv->get_regs_count = hns_gmac_get_regs_count;
+	mac_drv->get_ethtool_stats = hns_gmac_get_stats;
+	mac_drv->get_sset_count = hns_gmac_get_sset_count;
+	mac_drv->get_strings = hns_gmac_get_strings;
+	mac_drv->update_stats = hns_gmac_update_stats;
+
+	return (void *)mac_drv;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h
new file mode 100644
index 0000000..44fe301
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_GMAC_H
+#define _HNS_GMAC_H
+
+#include "hns_dsaf_mac.h"
+
+enum hns_port_mode {
+	GMAC_10M_MII = 0,
+	GMAC_100M_MII,
+	GMAC_1000M_GMII,
+	GMAC_10M_RGMII,
+	GMAC_100M_RGMII,
+	GMAC_1000M_RGMII,
+	GMAC_10M_SGMII,
+	GMAC_100M_SGMII,
+	GMAC_1000M_SGMII,
+	GMAC_10000M_SGMII	/* 10GE */
+};
+
+enum hns_gmac_duplex_mdoe {
+	GMAC_HALF_DUPLEX_MODE = 0,
+	GMAC_FULL_DUPLEX_MODE
+};
+
+struct hns_gmac_port_mode_cfg {
+	enum hns_port_mode port_mode;
+	u32 max_frm_size;
+	u32 short_runts_thr;
+	u32 pad_enable;
+	u32 crc_add;
+	u32 an_enable;	/*auto-nego enable  */
+	u32 runt_pkt_en;
+	u32 strip_pad_en;
+};
+
+#define ETH_GMAC_DUMP_NUM		96
+#endif				/* __HNS_GMAC_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
new file mode 100644
index 0000000..95bf42a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -0,0 +1,900 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/phy_fixed.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "hns_dsaf_misc.h"
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_rcb.h"
+
+#define MAC_EN_FLAG_V		0xada0328
+
+static const u16 mac_phy_to_speed[] = {
+	[PHY_INTERFACE_MODE_MII] = MAC_SPEED_100,
+	[PHY_INTERFACE_MODE_GMII] = MAC_SPEED_1000,
+	[PHY_INTERFACE_MODE_SGMII] = MAC_SPEED_1000,
+	[PHY_INTERFACE_MODE_TBI] = MAC_SPEED_1000,
+	[PHY_INTERFACE_MODE_RMII] = MAC_SPEED_100,
+	[PHY_INTERFACE_MODE_RGMII] = MAC_SPEED_1000,
+	[PHY_INTERFACE_MODE_RGMII_ID] = MAC_SPEED_1000,
+	[PHY_INTERFACE_MODE_RGMII_RXID]	= MAC_SPEED_1000,
+	[PHY_INTERFACE_MODE_RGMII_TXID]	= MAC_SPEED_1000,
+	[PHY_INTERFACE_MODE_RTBI] = MAC_SPEED_1000,
+	[PHY_INTERFACE_MODE_XGMII] = MAC_SPEED_10000
+};
+
+static const enum mac_mode g_mac_mode_100[] = {
+	[PHY_INTERFACE_MODE_MII]	= MAC_MODE_MII_100,
+	[PHY_INTERFACE_MODE_RMII]   = MAC_MODE_RMII_100
+};
+
+static const enum mac_mode g_mac_mode_1000[] = {
+	[PHY_INTERFACE_MODE_GMII]   = MAC_MODE_GMII_1000,
+	[PHY_INTERFACE_MODE_SGMII]  = MAC_MODE_SGMII_1000,
+	[PHY_INTERFACE_MODE_TBI]	= MAC_MODE_TBI_1000,
+	[PHY_INTERFACE_MODE_RGMII]  = MAC_MODE_RGMII_1000,
+	[PHY_INTERFACE_MODE_RGMII_ID]   = MAC_MODE_RGMII_1000,
+	[PHY_INTERFACE_MODE_RGMII_RXID] = MAC_MODE_RGMII_1000,
+	[PHY_INTERFACE_MODE_RGMII_TXID] = MAC_MODE_RGMII_1000,
+	[PHY_INTERFACE_MODE_RTBI]   = MAC_MODE_RTBI_1000
+};
+
+static enum mac_mode hns_mac_dev_to_enet_if(const struct hns_mac_cb *mac_cb)
+{
+	switch (mac_cb->max_speed) {
+	case MAC_SPEED_100:
+		return g_mac_mode_100[mac_cb->phy_if];
+	case MAC_SPEED_1000:
+		return g_mac_mode_1000[mac_cb->phy_if];
+	case MAC_SPEED_10000:
+		return MAC_MODE_XGMII_10000;
+	default:
+		return MAC_MODE_MII_100;
+	}
+}
+
+static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
+{
+	switch (mac_cb->max_speed) {
+	case MAC_SPEED_100:
+		return g_mac_mode_100[mac_cb->phy_if];
+	case MAC_SPEED_1000:
+		return g_mac_mode_1000[mac_cb->phy_if];
+	case MAC_SPEED_10000:
+		return MAC_MODE_XGMII_10000;
+	default:
+		return MAC_MODE_MII_100;
+	}
+}
+
+int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+	if (!mac_cb->cpld_vaddr)
+		return -ENODEV;
+
+	*sfp_prsnt = !dsaf_read_b((u8 *)mac_cb->cpld_vaddr
+					+ MAC_SFP_PORT_OFFSET);
+
+	return 0;
+}
+
+void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
+{
+	struct mac_driver *mac_ctrl_drv;
+	int ret, sfp_prsnt;
+
+	mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+	if (mac_ctrl_drv->get_link_status)
+		mac_ctrl_drv->get_link_status(mac_ctrl_drv, link_status);
+	else
+		*link_status = 0;
+
+	ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
+	if (!ret)
+		*link_status = *link_status && sfp_prsnt;
+
+	mac_cb->link = *link_status;
+}
+
+int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
+			  u8 *auto_neg, u16 *speed, u8 *duplex)
+{
+	struct mac_driver *mac_ctrl_drv;
+	struct mac_info    info;
+
+	mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+	if (!mac_ctrl_drv->get_info)
+		return -ENODEV;
+
+	mac_ctrl_drv->get_info(mac_ctrl_drv, &info);
+	if (auto_neg)
+		*auto_neg = info.auto_neg;
+	if (speed)
+		*speed = info.speed;
+	if (duplex)
+		*duplex = info.duplex;
+
+	return 0;
+}
+
+void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
+{
+	int ret;
+	struct mac_driver *mac_ctrl_drv;
+
+	mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
+
+	mac_cb->speed = speed;
+	mac_cb->half_duplex = !duplex;
+	mac_ctrl_drv->mac_mode = hns_mac_dev_to_enet_if(mac_cb);
+
+	if (mac_ctrl_drv->adjust_link) {
+		ret = mac_ctrl_drv->adjust_link(mac_ctrl_drv,
+			(enum mac_speed)speed, duplex);
+		if (ret) {
+			dev_err(mac_cb->dev,
+				"adjust_link failed,%s mac%d ret = %#x!\n",
+				mac_cb->dsaf_dev->ae_dev.name,
+				mac_cb->mac_id, ret);
+			return;
+		}
+	}
+}
+
+/**
+ *hns_mac_get_inner_port_num - get mac table inner port number
+ *@mac_cb: mac device
+ *@vmid: vm id
+ *@port_num:port number
+ *
+ */
+static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
+				      u8 vmid, u8 *port_num)
+{
+	u8 tmp_port;
+	u32 comm_idx;
+
+	if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
+		if (mac_cb->mac_id != DSAF_MAX_PORT_NUM_PER_CHIP) {
+			dev_err(mac_cb->dev,
+				"input invalid,%s mac%d vmid%d !\n",
+				mac_cb->dsaf_dev->ae_dev.name,
+				mac_cb->mac_id, vmid);
+			return -EINVAL;
+		}
+	} else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
+		if (mac_cb->mac_id <= DSAF_MAX_PORT_NUM_PER_CHIP) {
+			dev_err(mac_cb->dev,
+				"input invalid,%s mac%d vmid%d!\n",
+				mac_cb->dsaf_dev->ae_dev.name,
+				mac_cb->mac_id, vmid);
+			return -EINVAL;
+		}
+	} else {
+		dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
+			mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
+		return -EINVAL;
+	}
+
+	comm_idx = hns_dsaf_get_comm_idx_by_port(mac_cb->mac_id);
+
+	if (vmid >= mac_cb->dsaf_dev->rcb_common[comm_idx]->max_vfn) {
+		dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n",
+			mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
+		return -EINVAL;
+	}
+
+	switch (mac_cb->dsaf_dev->dsaf_mode) {
+	case DSAF_MODE_ENABLE_FIX:
+		tmp_port = 0;
+		break;
+	case DSAF_MODE_DISABLE_FIX:
+		tmp_port = 0;
+		break;
+	case DSAF_MODE_ENABLE_0VM:
+	case DSAF_MODE_ENABLE_8VM:
+	case DSAF_MODE_ENABLE_16VM:
+	case DSAF_MODE_ENABLE_32VM:
+	case DSAF_MODE_ENABLE_128VM:
+	case DSAF_MODE_DISABLE_2PORT_8VM:
+	case DSAF_MODE_DISABLE_2PORT_16VM:
+	case DSAF_MODE_DISABLE_2PORT_64VM:
+	case DSAF_MODE_DISABLE_6PORT_0VM:
+	case DSAF_MODE_DISABLE_6PORT_2VM:
+	case DSAF_MODE_DISABLE_6PORT_4VM:
+	case DSAF_MODE_DISABLE_6PORT_16VM:
+		tmp_port = vmid;
+		break;
+	default:
+		dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
+			mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
+		return -EINVAL;
+	}
+	tmp_port += DSAF_BASE_INNER_PORT_NUM;
+
+	*port_num = tmp_port;
+
+	return 0;
+}
+
+/**
+ *hns_mac_get_inner_port_num - change vf mac address
+ *@mac_cb: mac device
+ *@vmid: vmid
+ *@addr:mac address
+ */
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
+			   u32 vmid, char *addr)
+{
+	int ret;
+	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+	struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+	struct dsaf_drv_mac_single_dest_entry mac_entry;
+	struct mac_entry_idx *old_entry;
+
+	old_entry = &mac_cb->addr_entry_idx[vmid];
+	if (dsaf_dev) {
+		memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+		mac_entry.in_vlan_id = old_entry->vlan_id;
+		mac_entry.in_port_num = mac_cb->mac_id;
+		ret = hns_mac_get_inner_port_num(mac_cb, (u8)vmid,
+						 &mac_entry.port_num);
+		if (ret)
+			return ret;
+
+		if ((old_entry->valid != 0) &&
+		    (memcmp(old_entry->addr,
+		    addr, sizeof(mac_entry.addr)) != 0)) {
+			ret = hns_dsaf_del_mac_entry(dsaf_dev,
+						     old_entry->vlan_id,
+						     mac_cb->mac_id,
+						     old_entry->addr);
+			if (ret)
+				return ret;
+		}
+
+		ret = hns_dsaf_set_mac_uc_entry(dsaf_dev, &mac_entry);
+		if (ret)
+			return ret;
+	}
+
+	if ((mac_ctrl_drv->set_mac_addr) && (vmid == 0))
+		mac_ctrl_drv->set_mac_addr(mac_cb->priv.mac, addr);
+
+	memcpy(old_entry->addr, addr, sizeof(old_entry->addr));
+	old_entry->valid = 1;
+	return 0;
+}
+
+int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
+		      u32 port_num, char *addr, u8 en)
+{
+	int ret;
+	struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+	struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+	if (dsaf_dev && addr) {
+		memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+		mac_entry.in_vlan_id = 0;/*vlan_id;*/
+		mac_entry.in_port_num = mac_cb->mac_id;
+		mac_entry.port_num = port_num;
+
+		if (en == DISABLE)
+			ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+		else
+			ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+		if (ret) {
+			dev_err(dsaf_dev->dev,
+				"set mac mc port failed,%s mac%d ret = %#x!\n",
+				mac_cb->dsaf_dev->ae_dev.name,
+				mac_cb->mac_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *hns_mac_del_mac - delete mac address into dsaf table,can't delete the same
+ *                  address twice
+ *@net_dev: net device
+ *@vfn :   vf lan
+ *@mac : mac address
+ *return status
+ */
+int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac)
+{
+	struct mac_entry_idx *old_mac;
+	struct dsaf_device *dsaf_dev;
+	u32 ret;
+
+	dsaf_dev = mac_cb->dsaf_dev;
+
+	if (vfn < DSAF_MAX_VM_NUM) {
+		old_mac = &mac_cb->addr_entry_idx[vfn];
+	} else {
+		dev_err(mac_cb->dev,
+			"vf queue is too large,%s mac%d queue = %#x!\n",
+			mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn);
+		return -EINVAL;
+	}
+
+	if (dsaf_dev) {
+		ret = hns_dsaf_del_mac_entry(dsaf_dev, old_mac->vlan_id,
+					     mac_cb->mac_id, old_mac->addr);
+		if (ret)
+			return ret;
+
+		if (memcmp(old_mac->addr, mac, sizeof(old_mac->addr)) == 0)
+			old_mac->valid = 0;
+	}
+
+	return 0;
+}
+
+static void hns_mac_param_get(struct mac_params *param,
+			      struct hns_mac_cb *mac_cb)
+{
+	param->vaddr = (void *)mac_cb->vaddr;
+	param->mac_mode = hns_get_enet_interface(mac_cb);
+	memcpy(param->addr, mac_cb->addr_entry_idx[0].addr,
+	       MAC_NUM_OCTETS_PER_ADDR);
+	param->mac_id = mac_cb->mac_id;
+	param->dev = mac_cb->dev;
+}
+
+/**
+ *hns_mac_queue_config_bc_en - set broadcast rx&tx enable
+ *@mac_cb: mac device
+ *@queue: queue number
+ *@en:enable
+ *retuen 0 - success , negative --fail
+ */
+static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
+				     u32 port_num, u16 vlan_id, u8 en)
+{
+	int ret;
+	struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+	u8 addr[MAC_NUM_OCTETS_PER_ADDR]
+		= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+	struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+	/* directy return ok in debug network mode */
+	if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+		return 0;
+
+	if (dsaf_dev) {
+		memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+		mac_entry.in_vlan_id = vlan_id;
+		mac_entry.in_port_num = mac_cb->mac_id;
+		mac_entry.port_num = port_num;
+
+		if (en == DISABLE)
+			ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+		else
+			ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ *hns_mac_vm_config_bc_en - set broadcast rx&tx enable
+ *@mac_cb: mac device
+ *@vmid: vm id
+ *@en:enable
+ *retuen 0 - success , negative --fail
+ */
+int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, u8 en)
+{
+	int ret;
+	struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+	u8 port_num;
+	u8 addr[MAC_NUM_OCTETS_PER_ADDR]
+		= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+	struct mac_entry_idx *uc_mac_entry;
+	struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+	if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+		return 0;
+
+	uc_mac_entry = &mac_cb->addr_entry_idx[vmid];
+
+	if (dsaf_dev)  {
+		memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+		mac_entry.in_vlan_id = uc_mac_entry->vlan_id;
+		mac_entry.in_port_num = mac_cb->mac_id;
+		ret = hns_mac_get_inner_port_num(mac_cb, vmid, &port_num);
+		if (ret)
+			return ret;
+		mac_entry.port_num = port_num;
+
+		if (en == DISABLE)
+			ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+		else
+			ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+		return ret;
+	}
+
+	return 0;
+}
+
+void hns_mac_reset(struct hns_mac_cb *mac_cb)
+{
+	struct mac_driver *drv;
+
+	drv = hns_mac_get_drv(mac_cb);
+
+	drv->mac_init(drv);
+
+	if (drv->config_max_frame_length)
+		drv->config_max_frame_length(drv, mac_cb->max_frm);
+
+	if (drv->set_tx_auto_pause_frames)
+		drv->set_tx_auto_pause_frames(drv, mac_cb->tx_pause_frm_time);
+
+	if (drv->set_an_mode)
+		drv->set_an_mode(drv, 1);
+
+	if (drv->mac_pausefrm_cfg) {
+		if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+			drv->mac_pausefrm_cfg(drv, 0, 0);
+		else /* mac rx must disable, dsaf pfc close instead of it*/
+			drv->mac_pausefrm_cfg(drv, 0, 1);
+	}
+}
+
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu)
+{
+	struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+	u32 buf_size = mac_cb->dsaf_dev->buf_size;
+	u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+	if ((new_mtu < MAC_MIN_MTU) || (new_frm > MAC_MAX_MTU) ||
+	    (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size))
+		return -EINVAL;
+
+	if (!drv->config_max_frame_length)
+		return -ECHILD;
+
+	/* adjust max frame to be at least the size of a standard frame */
+	if (new_frm < (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN))
+		new_frm = (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN);
+
+	drv->config_max_frame_length(drv, new_frm);
+
+	mac_cb->max_frm = new_frm;
+
+	return 0;
+}
+
+void hns_mac_start(struct hns_mac_cb *mac_cb)
+{
+	struct mac_driver *mac_drv = hns_mac_get_drv(mac_cb);
+
+	/* for virt */
+	if (mac_drv->mac_en_flg == MAC_EN_FLAG_V) {
+		/*plus 1 when the virtual mac has been enabled */
+		mac_drv->virt_dev_num += 1;
+		return;
+	}
+
+	if (mac_drv->mac_enable) {
+		mac_drv->mac_enable(mac_cb->priv.mac, MAC_COMM_MODE_RX_AND_TX);
+		mac_drv->mac_en_flg = MAC_EN_FLAG_V;
+	}
+}
+
+void hns_mac_stop(struct hns_mac_cb *mac_cb)
+{
+	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+	/*modified for virtualization */
+	if (mac_ctrl_drv->virt_dev_num > 0) {
+		mac_ctrl_drv->virt_dev_num -= 1;
+		if (mac_ctrl_drv->virt_dev_num > 0)
+			return;
+	}
+
+	if (mac_ctrl_drv->mac_disable)
+		mac_ctrl_drv->mac_disable(mac_cb->priv.mac,
+			MAC_COMM_MODE_RX_AND_TX);
+
+	mac_ctrl_drv->mac_en_flg = 0;
+	mac_cb->link = 0;
+	cpld_led_reset(mac_cb);
+}
+
+/**
+ * hns_mac_get_autoneg - get auto autonegotiation
+ * @mac_cb: mac control block
+ * @enable: enable or not
+ * retuen 0 - success , negative --fail
+ */
+void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg)
+{
+	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+	if (mac_ctrl_drv->autoneg_stat)
+		mac_ctrl_drv->autoneg_stat(mac_ctrl_drv, auto_neg);
+	else
+		*auto_neg = 0;
+}
+
+/**
+ * hns_mac_get_pauseparam - set rx & tx pause parameter
+ * @mac_cb: mac control block
+ * @rx_en: rx enable status
+ * @tx_en: tx enable status
+ * retuen 0 - success , negative --fail
+ */
+void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
+{
+	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+	if (mac_ctrl_drv->get_pause_enable) {
+		mac_ctrl_drv->get_pause_enable(mac_ctrl_drv, rx_en, tx_en);
+	} else {
+		*rx_en = 0;
+		*tx_en = 0;
+	}
+
+	/* Due to the chip defect, the service mac's rx pause CAN'T be enabled.
+	 * We set the rx pause frm always be true (1), because DSAF deals with
+	 * the rx pause frm instead of service mac. After all, we still support
+	 * rx pause frm.
+	 */
+	if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+		*rx_en = 1;
+}
+
+/**
+ * hns_mac_set_autoneg - set auto autonegotiation
+ * @mac_cb: mac control block
+ * @enable: enable or not
+ * retuen 0 - success , negative --fail
+ */
+int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
+{
+	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+	if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII && enable) {
+		dev_err(mac_cb->dev, "enable autoneg is not allowed!");
+		return -ENOTSUPP;
+	}
+
+	if (mac_ctrl_drv->set_an_mode)
+		mac_ctrl_drv->set_an_mode(mac_ctrl_drv, enable);
+
+	return 0;
+}
+
+/**
+ * hns_mac_set_autoneg - set rx & tx pause parameter
+ * @mac_cb: mac control block
+ * @rx_en: rx enable or not
+ * @tx_en: tx enable or not
+ * return 0 - success , negative --fail
+ */
+int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
+{
+	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+	if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
+		if (!rx_en) {
+			dev_err(mac_cb->dev, "disable rx_pause is not allowed!");
+			return -EINVAL;
+		}
+	} else if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
+		if (tx_en || rx_en) {
+			dev_err(mac_cb->dev, "enable tx_pause or enable rx_pause are not allowed!");
+			return -EINVAL;
+		}
+	} else {
+		dev_err(mac_cb->dev, "Unsupport this operation!");
+		return -EINVAL;
+	}
+
+	if (mac_ctrl_drv->mac_pausefrm_cfg)
+		mac_ctrl_drv->mac_pausefrm_cfg(mac_ctrl_drv, rx_en, tx_en);
+
+	return 0;
+}
+
+/**
+ * hns_mac_init_ex - mac init
+ * @mac_cb: mac control block
+ * retuen 0 - success , negative --fail
+ */
+static int hns_mac_init_ex(struct hns_mac_cb *mac_cb)
+{
+	int ret;
+	struct mac_params param;
+	struct mac_driver *drv;
+
+	hns_dsaf_fix_mac_mode(mac_cb);
+
+	memset(&param, 0, sizeof(struct mac_params));
+	hns_mac_param_get(&param, mac_cb);
+
+	if (MAC_SPEED_FROM_MODE(param.mac_mode) < MAC_SPEED_10000)
+		drv = (struct mac_driver *)hns_gmac_config(mac_cb, &param);
+	else
+		drv = (struct mac_driver *)hns_xgmac_config(mac_cb, &param);
+
+	if (!drv)
+		return -ENOMEM;
+
+	mac_cb->priv.mac = (void *)drv;
+	hns_mac_reset(mac_cb);
+
+	hns_mac_adjust_link(mac_cb, mac_cb->speed, !mac_cb->half_duplex);
+
+	ret = hns_mac_port_config_bc_en(mac_cb, mac_cb->mac_id, 0, ENABLE);
+	if (ret)
+		goto free_mac_drv;
+
+	return 0;
+
+free_mac_drv:
+	drv->mac_free(mac_cb->priv.mac);
+	mac_cb->priv.mac = NULL;
+
+	return ret;
+}
+
+/**
+ *mac_free_dev  - get mac information from device node
+ *@mac_cb: mac device
+ *@np:device node
+ *@mac_mode_idx:mac mode index
+ */
+static void hns_mac_get_info(struct hns_mac_cb *mac_cb,
+			     struct device_node *np, u32 mac_mode_idx)
+{
+	mac_cb->link = false;
+	mac_cb->half_duplex = false;
+	mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
+	mac_cb->max_speed = mac_cb->speed;
+
+	if (mac_cb->phy_if == PHY_INTERFACE_MODE_SGMII) {
+		mac_cb->if_support = MAC_GMAC_SUPPORTED;
+		mac_cb->if_support |= SUPPORTED_1000baseT_Full;
+	} else if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII) {
+		mac_cb->if_support = SUPPORTED_10000baseR_FEC;
+		mac_cb->if_support |= SUPPORTED_10000baseKR_Full;
+	}
+
+	mac_cb->max_frm = MAC_DEFAULT_MTU;
+	mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME;
+
+	/* Get the rest of the PHY information */
+	mac_cb->phy_node = of_parse_phandle(np, "phy-handle", mac_cb->mac_id);
+	if (mac_cb->phy_node)
+		dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
+			mac_cb->mac_id, mac_cb->phy_node->name);
+}
+
+/**
+ * hns_mac_get_mode - get mac mode
+ * @phy_if: phy interface
+ * retuen 0 - gmac, 1 - xgmac , negative --fail
+ */
+static int hns_mac_get_mode(phy_interface_t phy_if)
+{
+	switch (phy_if) {
+	case PHY_INTERFACE_MODE_SGMII:
+		return MAC_GMAC_IDX;
+	case PHY_INTERFACE_MODE_XGMII:
+		return MAC_XGMAC_IDX;
+	default:
+		return -EINVAL;
+	}
+}
+
+u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
+			      struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
+{
+	u8 __iomem *base = dsaf_dev->io_base;
+	int mac_id = mac_cb->mac_id;
+
+	if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+		return base + 0x40000 + mac_id * 0x4000 -
+				mac_mode_idx * 0x20000;
+	else
+		return mac_cb->serdes_vaddr + 0x1000
+			+ (mac_id - DSAF_SERVICE_PORT_NUM_PER_DSAF) * 0x100000;
+}
+
+/**
+ * hns_mac_get_cfg - get mac cfg from dtb or acpi table
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_idx: mac index
+ * retuen 0 - success , negative --fail
+ */
+int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
+{
+	int ret;
+	u32 mac_mode_idx;
+	struct hns_mac_cb *mac_cb = &dsaf_dev->mac_cb[mac_idx];
+
+	mac_cb->dsaf_dev = dsaf_dev;
+	mac_cb->dev = dsaf_dev->dev;
+	mac_cb->mac_id = mac_idx;
+
+	mac_cb->sys_ctl_vaddr =	dsaf_dev->sc_base;
+	mac_cb->serdes_vaddr = dsaf_dev->sds_base;
+
+	if (dsaf_dev->cpld_base &&
+	    mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+		mac_cb->cpld_vaddr = dsaf_dev->cpld_base +
+			mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET;
+	mac_cb->sfp_prsnt = 0;
+	mac_cb->txpkt_for_led = 0;
+	mac_cb->rxpkt_for_led = 0;
+
+	if (mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+		mac_cb->mac_type = HNAE_PORT_SERVICE;
+	else
+		mac_cb->mac_type = HNAE_PORT_DEBUG;
+
+	mac_cb->phy_if = hns_mac_get_phy_if(mac_cb);
+
+	ret = hns_mac_get_mode(mac_cb->phy_if);
+	if (ret < 0) {
+		dev_err(dsaf_dev->dev,
+			"hns_mac_get_mode failed,mac%d ret = %#x!\n",
+			mac_cb->mac_id, ret);
+		return ret;
+	}
+	mac_mode_idx = (u32)ret;
+
+	hns_mac_get_info(mac_cb, mac_cb->dev->of_node, mac_mode_idx);
+
+	mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
+
+	return 0;
+}
+
+/**
+ * hns_mac_init - init mac
+ * @dsaf_dev: dsa fabric device struct pointer
+ * retuen 0 - success , negative --fail
+ */
+int hns_mac_init(struct dsaf_device *dsaf_dev)
+{
+	int i;
+	int ret;
+	size_t size;
+	struct hns_mac_cb *mac_cb;
+
+	size = sizeof(struct hns_mac_cb) * DSAF_MAX_PORT_NUM_PER_CHIP;
+	dsaf_dev->mac_cb = devm_kzalloc(dsaf_dev->dev, size, GFP_KERNEL);
+	if (!dsaf_dev->mac_cb)
+		return -ENOMEM;
+
+	for (i = 0; i < DSAF_MAX_PORT_NUM_PER_CHIP; i++) {
+		ret = hns_mac_get_cfg(dsaf_dev, i);
+		if (ret)
+			goto free_mac_cb;
+
+		mac_cb = &dsaf_dev->mac_cb[i];
+		ret = hns_mac_init_ex(mac_cb);
+		if (ret)
+			goto free_mac_cb;
+	}
+
+	return 0;
+
+free_mac_cb:
+	dsaf_dev->mac_cb = NULL;
+
+	return ret;
+}
+
+void hns_mac_uninit(struct dsaf_device *dsaf_dev)
+{
+	cpld_led_reset(dsaf_dev->mac_cb);
+	dsaf_dev->mac_cb = NULL;
+}
+
+int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
+				enum hnae_loop loop, int en)
+{
+	int ret;
+	struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+
+	if (drv->config_loopback)
+		ret = drv->config_loopback(drv, loop, en);
+	else
+		ret = -ENOTSUPP;
+
+	return ret;
+}
+
+void hns_mac_update_stats(struct hns_mac_cb *mac_cb)
+{
+	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+	mac_ctrl_drv->update_stats(mac_ctrl_drv);
+}
+
+void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data)
+{
+	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+	mac_ctrl_drv->get_ethtool_stats(mac_ctrl_drv, data);
+}
+
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb,
+			 int stringset, u8 *data)
+{
+	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+	mac_ctrl_drv->get_strings(stringset, data);
+}
+
+int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset)
+{
+	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+	return mac_ctrl_drv->get_sset_count(stringset);
+}
+
+int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb)
+{
+	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+	return mac_ctrl_drv->get_regs_count();
+}
+
+void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data)
+{
+	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+	mac_ctrl_drv->get_regs(mac_ctrl_drv, data);
+}
+
+void hns_set_led_opt(struct hns_mac_cb *mac_cb)
+{
+	int nic_data = 0;
+	int txpkts, rxpkts;
+
+	txpkts = mac_cb->txpkt_for_led - mac_cb->hw_stats.tx_good_pkts;
+	rxpkts = mac_cb->rxpkt_for_led - mac_cb->hw_stats.rx_good_pkts;
+	if (txpkts || rxpkts)
+		nic_data = 1;
+	else
+		nic_data = 0;
+	mac_cb->txpkt_for_led = mac_cb->hw_stats.tx_good_pkts;
+	mac_cb->rxpkt_for_led = mac_cb->hw_stats.rx_good_pkts;
+	hns_cpld_set_led(mac_cb, (int)mac_cb->link,
+			 mac_cb->speed, nic_data);
+}
+
+int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
+			enum hnae_led_state status)
+{
+	if (!mac_cb || !mac_cb->cpld_vaddr)
+		return 0;
+
+	return cpld_set_led_id(mac_cb, status);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
new file mode 100644
index 0000000..7da95a7
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -0,0 +1,456 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_MAC_H
+#define _HNS_DSAF_MAC_H
+
+#include <linux/phy.h>
+#include <linux/kernel.h>
+#include <linux/if_vlan.h>
+#include "hns_dsaf_main.h"
+
+struct dsaf_device;
+
+#define MAC_GMAC_SUPPORTED \
+	(SUPPORTED_10baseT_Half \
+	| SUPPORTED_10baseT_Full \
+	| SUPPORTED_100baseT_Half \
+	| SUPPORTED_100baseT_Full \
+	| SUPPORTED_Autoneg)
+
+#define MAC_DEFAULT_MTU	(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+#define MAC_MAX_MTU		9600
+#define MAC_MIN_MTU		68
+
+#define MAC_DEFAULT_PAUSE_TIME 0xff
+
+#define MAC_GMAC_IDX 0
+#define MAC_XGMAC_IDX 1
+
+#define ETH_STATIC_REG	 1
+#define ETH_DUMP_REG	 5
+/* check mac addr broadcast */
+#define MAC_IS_BROADCAST(p)	((*(p) == 0xff) && (*((p) + 1) == 0xff) && \
+		(*((p) + 2) == 0xff) &&  (*((p) + 3) == 0xff)  && \
+		(*((p) + 4) == 0xff) && (*((p) + 5) == 0xff))
+
+/* check mac addr is 01-00-5e-xx-xx-xx*/
+#define MAC_IS_L3_MULTICAST(p) ((*((p) + 0) == 0x01) && \
+			(*((p) + 1) == 0x00)   && \
+			(*((p) + 2) == 0x5e))
+
+/*check the mac addr is 0 in all bit*/
+#define MAC_IS_ALL_ZEROS(p)   ((*(p) == 0) && (*((p) + 1) == 0) && \
+	(*((p) + 2) == 0) && (*((p) + 3) == 0) && \
+	(*((p) + 4) == 0) && (*((p) + 5) == 0))
+
+/*check mac addr multicast*/
+#define MAC_IS_MULTICAST(p)	((*((u8 *)((p) + 0)) & 0x01) ? (1) : (0))
+
+/**< Number of octets (8-bit bytes) in an ethernet address */
+#define MAC_NUM_OCTETS_PER_ADDR 6
+
+struct mac_priv {
+	void *mac;
+};
+
+/* net speed */
+enum mac_speed {
+	MAC_SPEED_10	= 10,	   /**< 10 Mbps */
+	MAC_SPEED_100	= 100,	  /**< 100 Mbps */
+	MAC_SPEED_1000  = 1000,	 /**< 1000 Mbps = 1 Gbps */
+	MAC_SPEED_10000 = 10000	 /**< 10000 Mbps = 10 Gbps */
+};
+
+/*mac interface keyword	*/
+enum mac_intf {
+	MAC_IF_NONE  = 0x00000000,   /**< interface not invalid */
+	MAC_IF_MII   = 0x00010000,   /**< MII interface */
+	MAC_IF_RMII  = 0x00020000,   /**< RMII interface */
+	MAC_IF_SMII  = 0x00030000,   /**< SMII interface */
+	MAC_IF_GMII  = 0x00040000,   /**< GMII interface */
+	MAC_IF_RGMII = 0x00050000,   /**< RGMII interface */
+	MAC_IF_TBI   = 0x00060000,   /**< TBI interface */
+	MAC_IF_RTBI  = 0x00070000,   /**< RTBI interface */
+	MAC_IF_SGMII = 0x00080000,   /**< SGMII interface */
+	MAC_IF_XGMII = 0x00090000,   /**< XGMII interface */
+	MAC_IF_QSGMII = 0x000a0000	/**< QSGMII interface */
+};
+
+/*mac mode */
+enum mac_mode {
+	/**< Invalid Ethernet mode */
+	MAC_MODE_INVALID	 = 0,
+	/**<	10 Mbps MII   */
+	MAC_MODE_MII_10	  = (MAC_IF_MII   | MAC_SPEED_10),
+	/**<   100 Mbps MII   */
+	MAC_MODE_MII_100	 = (MAC_IF_MII   | MAC_SPEED_100),
+	/**<	10 Mbps RMII  */
+	MAC_MODE_RMII_10	 = (MAC_IF_RMII  | MAC_SPEED_10),
+	/**<   100 Mbps RMII  */
+	MAC_MODE_RMII_100	= (MAC_IF_RMII  | MAC_SPEED_100),
+	/**<	10 Mbps SMII  */
+	MAC_MODE_SMII_10	 = (MAC_IF_SMII  | MAC_SPEED_10),
+	/**<   100 Mbps SMII  */
+	MAC_MODE_SMII_100	= (MAC_IF_SMII  | MAC_SPEED_100),
+	/**<  1000 Mbps GMII  */
+	MAC_MODE_GMII_1000   = (MAC_IF_GMII  | MAC_SPEED_1000),
+	/**<	10 Mbps RGMII */
+	MAC_MODE_RGMII_10	= (MAC_IF_RGMII | MAC_SPEED_10),
+	/**<   100 Mbps RGMII */
+	MAC_MODE_RGMII_100   = (MAC_IF_RGMII | MAC_SPEED_100),
+	/**<  1000 Mbps RGMII */
+	MAC_MODE_RGMII_1000  = (MAC_IF_RGMII | MAC_SPEED_1000),
+	/**<  1000 Mbps TBI   */
+	MAC_MODE_TBI_1000	= (MAC_IF_TBI   | MAC_SPEED_1000),
+	/**<  1000 Mbps RTBI  */
+	MAC_MODE_RTBI_1000   = (MAC_IF_RTBI  | MAC_SPEED_1000),
+	/**<	10 Mbps SGMII */
+	MAC_MODE_SGMII_10	= (MAC_IF_SGMII | MAC_SPEED_10),
+	/**<   100 Mbps SGMII */
+	MAC_MODE_SGMII_100   = (MAC_IF_SGMII | MAC_SPEED_100),
+	/**<  1000 Mbps SGMII */
+	MAC_MODE_SGMII_1000  = (MAC_IF_SGMII | MAC_SPEED_1000),
+	/**< 10000 Mbps XGMII */
+	MAC_MODE_XGMII_10000 = (MAC_IF_XGMII | MAC_SPEED_10000),
+	/**<  1000 Mbps QSGMII */
+	MAC_MODE_QSGMII_1000 = (MAC_IF_QSGMII | MAC_SPEED_1000)
+};
+
+/*mac communicate mode*/
+enum mac_commom_mode {
+	MAC_COMM_MODE_NONE	  = 0, /**< No transmit/receive communication */
+	MAC_COMM_MODE_RX		= 1, /**< Only receive communication */
+	MAC_COMM_MODE_TX		= 2, /**< Only transmit communication */
+	MAC_COMM_MODE_RX_AND_TX = 3  /**< Both tx and rx communication */
+};
+
+/*mac statistics */
+struct mac_statistics {
+	u64  stat_pkts64; /* r-10G tr-DT 64 byte frame counter */
+	u64  stat_pkts65to127; /* r-10G 65 to 127 byte frame counter */
+	u64  stat_pkts128to255; /* r-10G 128 to 255 byte frame counter */
+	u64  stat_pkts256to511; /*r-10G 256 to 511 byte frame counter */
+	u64  stat_pkts512to1023;/* r-10G 512 to 1023 byte frame counter */
+	u64  stat_pkts1024to1518; /* r-10G 1024 to 1518 byte frame counter */
+	u64  stat_pkts1519to1522; /* r-10G 1519 to 1522 byte good frame count*/
+	/* Total number of packets that were less than 64 octets */
+	/*			long with a wrong CRC.*/
+	u64  stat_fragments;
+	/* Total number of packets longer than valid maximum length octets */
+	u64  stat_jabbers;
+	/* number of dropped packets due to internal errors of */
+	/*			the MAC Client. */
+	u64  stat_drop_events;
+	/* Incremented when frames of correct length but with */
+	/*			CRC error are received.*/
+	u64  stat_crc_align_errors;
+	/* Total number of packets that were less than 64 octets */
+	/*			long with a good CRC.*/
+	u64  stat_undersize_pkts;
+	u64  stat_oversize_pkts;  /**< T,B.D*/
+
+	u64  stat_rx_pause;		   /**< Pause MAC Control received */
+	u64  stat_tx_pause;		   /**< Pause MAC Control sent */
+
+	u64  in_octets;		/**< Total number of byte received. */
+	u64  in_pkts;		/* Total number of packets received.*/
+	u64  in_mcast_pkts;	/* Total number of multicast frame received */
+	u64  in_bcast_pkts;	/* Total number of broadcast frame received */
+				/* Frames received, but discarded due to */
+				/* problems within the MAC RX. */
+	u64  in_discards;
+	u64  in_errors;		/* Number of frames received with error: */
+				/*	- FIFO Overflow Error */
+				/*	- CRC Error */
+				/*	- Frame Too Long Error */
+				/*	- Alignment Error */
+	u64  out_octets; /*Total number of byte sent. */
+	u64  out_pkts;	/**< Total number of packets sent .*/
+	u64  out_mcast_pkts; /* Total number of multicast frame sent */
+	u64  out_bcast_pkts; /* Total number of multicast frame sent */
+	/* Frames received, but discarded due to problems within */
+	/*			the MAC TX N/A!.*/
+	u64  out_discards;
+	u64  out_errors;	/*Number of frames transmitted with error: */
+			/*	- FIFO Overflow Error */
+			/*	- FIFO Underflow Error */
+			/*	 - Other */
+};
+
+/*mac para struct ,mac get param from nic or dsaf when initialize*/
+struct mac_params {
+	char addr[MAC_NUM_OCTETS_PER_ADDR];
+	void *vaddr; /*virtual address*/
+	struct device *dev;
+	u8 mac_id;
+	/**< Ethernet operation mode (MAC-PHY interface and speed) */
+	enum mac_mode mac_mode;
+};
+
+struct mac_info {
+	u16 speed;/* The forced speed (lower bits) in */
+		/*		 *mbps. Please use */
+		/*		 * ethtool_cmd_speed()/_set() to */
+		/*		 * access it */
+	u8 duplex;		/* Duplex, half or full */
+	u8 auto_neg;	/* Enable or disable autonegotiation */
+	enum hnae_loop loop_mode;
+	u8 tx_pause_en;
+	u8 tx_pause_time;
+	u8 rx_pause_en;
+	u8 pad_and_crc_en;
+	u8 promiscuous_en;
+	u8 port_en;	 /*port enable*/
+};
+
+struct mac_entry_idx {
+	u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+	u16 vlan_id:12;
+	u16 valid:1;
+	u16 qos:3;
+};
+
+struct mac_hw_stats {
+	u64 rx_good_pkts;	/* only for xgmac */
+	u64 rx_good_bytes;
+	u64 rx_total_pkts;	/* only for xgmac */
+	u64 rx_total_bytes;	/* only for xgmac */
+	u64 rx_bad_bytes;	/* only for gmac */
+	u64 rx_uc_pkts;
+	u64 rx_mc_pkts;
+	u64 rx_bc_pkts;
+	u64 rx_fragment_err;	/* only for xgmac */
+	u64 rx_undersize;	/* only for xgmac */
+	u64 rx_under_min;
+	u64 rx_minto64;		/* only for gmac */
+	u64 rx_64bytes;
+	u64 rx_65to127;
+	u64 rx_128to255;
+	u64 rx_256to511;
+	u64 rx_512to1023;
+	u64 rx_1024to1518;
+	u64 rx_1519tomax;
+	u64 rx_1519tomax_good;	/* only for xgmac */
+	u64 rx_oversize;
+	u64 rx_jabber_err;
+	u64 rx_fcs_err;
+	u64 rx_vlan_pkts;	/* only for gmac */
+	u64 rx_data_err;	/* only for gmac */
+	u64 rx_align_err;	/* only for gmac */
+	u64 rx_long_err;	/* only for gmac */
+	u64 rx_pfc_tc0;
+	u64 rx_pfc_tc1;		/* only for xgmac */
+	u64 rx_pfc_tc2;		/* only for xgmac */
+	u64 rx_pfc_tc3;		/* only for xgmac */
+	u64 rx_pfc_tc4;		/* only for xgmac */
+	u64 rx_pfc_tc5;		/* only for xgmac */
+	u64 rx_pfc_tc6;		/* only for xgmac */
+	u64 rx_pfc_tc7;		/* only for xgmac */
+	u64 rx_unknown_ctrl;
+	u64 rx_filter_pkts;	/* only for gmac */
+	u64 rx_filter_bytes;	/* only for gmac */
+	u64 rx_fifo_overrun_err;/* only for gmac */
+	u64 rx_len_err;		/* only for gmac */
+	u64 rx_comma_err;	/* only for gmac */
+	u64 rx_symbol_err;	/* only for xgmac */
+	u64 tx_good_to_sw;	/* only for xgmac */
+	u64 tx_bad_to_sw;	/* only for xgmac */
+	u64 rx_1731_pkts;	/* only for xgmac */
+
+	u64 tx_good_bytes;
+	u64 tx_good_pkts;	/* only for xgmac */
+	u64 tx_total_bytes;	/* only for xgmac */
+	u64 tx_total_pkts;	/* only for xgmac */
+	u64 tx_bad_bytes;	/* only for gmac */
+	u64 tx_bad_pkts;	/* only for xgmac */
+	u64 tx_uc_pkts;
+	u64 tx_mc_pkts;
+	u64 tx_bc_pkts;
+	u64 tx_undersize;	/* only for xgmac */
+	u64 tx_fragment_err;	/* only for xgmac */
+	u64 tx_under_min_pkts;	/* only for gmac */
+	u64 tx_64bytes;
+	u64 tx_65to127;
+	u64 tx_128to255;
+	u64 tx_256to511;
+	u64 tx_512to1023;
+	u64 tx_1024to1518;
+	u64 tx_1519tomax;
+	u64 tx_1519tomax_good;	/* only for xgmac */
+	u64 tx_oversize;	/* only for xgmac */
+	u64 tx_jabber_err;
+	u64 tx_underrun_err;	/* only for gmac */
+	u64 tx_vlan;		/* only for gmac */
+	u64 tx_crc_err;		/* only for gmac */
+	u64 tx_pfc_tc0;
+	u64 tx_pfc_tc1;		/* only for xgmac */
+	u64 tx_pfc_tc2;		/* only for xgmac */
+	u64 tx_pfc_tc3;		/* only for xgmac */
+	u64 tx_pfc_tc4;		/* only for xgmac */
+	u64 tx_pfc_tc5;		/* only for xgmac */
+	u64 tx_pfc_tc6;		/* only for xgmac */
+	u64 tx_pfc_tc7;		/* only for xgmac */
+	u64 tx_ctrl;		/* only for xgmac */
+	u64 tx_1731_pkts;	/* only for xgmac */
+	u64 tx_1588_pkts;	/* only for xgmac */
+	u64 rx_good_from_sw;	/* only for xgmac */
+	u64 rx_bad_from_sw;	/* only for xgmac */
+};
+
+struct hns_mac_cb {
+	struct device *dev;
+	struct dsaf_device *dsaf_dev;
+	struct mac_priv priv;
+	u8 __iomem *vaddr;
+	u8 __iomem *cpld_vaddr;
+	u8 __iomem *sys_ctl_vaddr;
+	u8 __iomem *serdes_vaddr;
+	struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM];
+	u8 sfp_prsnt;
+	u8 cpld_led_value;
+	u8 mac_id;
+
+	u8 link;
+	u8 half_duplex;
+	u16 speed;
+	u16 max_speed;
+	u16 max_frm;
+	u16 tx_pause_frm_time;
+	u32 if_support;
+	u64 txpkt_for_led;
+	u64 rxpkt_for_led;
+	enum hnae_port_type mac_type;
+	phy_interface_t phy_if;
+	enum hnae_loop loop_mode;
+
+	struct device_node *phy_node;
+
+	struct mac_hw_stats hw_stats;
+};
+
+struct mac_driver {
+	/*init Mac when init nic or dsaf*/
+	void (*mac_init)(void *mac_drv);
+	/*remove mac when remove nic or dsaf*/
+	void (*mac_free)(void *mac_drv);
+	/*enable mac when enable nic or dsaf*/
+	void (*mac_enable)(void *mac_drv, enum mac_commom_mode mode);
+	/*disable mac when disable nic or dsaf*/
+	void (*mac_disable)(void *mac_drv, enum mac_commom_mode mode);
+	/* config mac address*/
+	void (*set_mac_addr)(void *mac_drv,	char *mac_addr);
+	/*adjust mac mode of port,include speed and duplex*/
+	int (*adjust_link)(void *mac_drv, enum mac_speed speed,
+			   u32 full_duplex);
+	/* config autoegotaite mode of port*/
+	void (*set_an_mode)(void *mac_drv, u8 enable);
+	/* config loopbank mode */
+	int (*config_loopback)(void *mac_drv, enum hnae_loop loop_mode,
+			       u8 enable);
+	/* config mtu*/
+	void (*config_max_frame_length)(void *mac_drv, u16 newval);
+	/*config PAD and CRC enable */
+	void (*config_pad_and_crc)(void *mac_drv, u8 newval);
+	/* config duplex mode*/
+	void (*config_half_duplex)(void *mac_drv, u8 newval);
+	/*config tx pause time,if pause_time is zero,disable tx pause enable*/
+	void (*set_tx_auto_pause_frames)(void *mac_drv, u16 pause_time);
+	/*config rx pause enable*/
+	void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable);
+	/* config rx mode for promiscuous*/
+	int (*set_promiscuous)(void *mac_drv, u8 enable);
+	/* get mac id */
+	void (*mac_get_id)(void *mac_drv, u8 *mac_id);
+	void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
+
+	void (*autoneg_stat)(void *mac_drv, u32 *enable);
+	int (*set_pause_enable)(void *mac_drv, u32 rx_en, u32 tx_en);
+	void (*get_pause_enable)(void *mac_drv, u32 *rx_en, u32 *tx_en);
+	void (*get_link_status)(void *mac_drv, u32 *link_stat);
+	/* get the imporant regs*/
+	void (*get_regs)(void *mac_drv, void *data);
+	int (*get_regs_count)(void);
+	/* get strings name for ethtool statistic */
+	void (*get_strings)(u32 stringset, u8 *data);
+	/* get the number of strings*/
+	int (*get_sset_count)(int stringset);
+
+	/* get the statistic by ethtools*/
+	void (*get_ethtool_stats)(void *mac_drv, u64 *data);
+
+	/* get mac information */
+	void (*get_info)(void *mac_drv, struct mac_info *mac_info);
+
+	void (*update_stats)(void *mac_drv);
+
+	enum mac_mode mac_mode;
+	u8 mac_id;
+	struct hns_mac_cb *mac_cb;
+	void __iomem *io_base;
+	unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
+	unsigned int virt_dev_num;
+	struct device *dev;
+};
+
+struct mac_stats_string {
+	char desc[64];
+	unsigned long offset;
+};
+
+#define MAC_MAKE_MODE(interface, speed) (enum mac_mode)((interface) | (speed))
+#define MAC_INTERFACE_FROM_MODE(mode) (enum mac_intf)((mode) & 0xFFFF0000)
+#define MAC_SPEED_FROM_MODE(mode) (enum mac_speed)((mode) & 0x0000FFFF)
+#define MAC_STATS_FIELD_OFF(field) (offsetof(struct mac_hw_stats, field))
+
+static inline struct mac_driver *hns_mac_get_drv(
+	const struct hns_mac_cb *mac_cb)
+{
+	return (struct mac_driver *)(mac_cb->priv.mac);
+}
+
+void *hns_gmac_config(struct hns_mac_cb *mac_cb,
+		      struct mac_params *mac_param);
+void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
+		       struct mac_params *mac_param);
+
+int hns_mac_init(struct dsaf_device *dsaf_dev);
+void mac_adjust_link(struct net_device *net_dev);
+void hns_mac_get_link_status(struct hns_mac_cb *mac_cb,	u32 *link_status);
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
+int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
+		      u32 port_num, char *addr, u8 en);
+int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, u8 en);
+void hns_mac_start(struct hns_mac_cb *mac_cb);
+void hns_mac_stop(struct hns_mac_cb *mac_cb);
+int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac);
+void hns_mac_uninit(struct dsaf_device *dsaf_dev);
+void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
+void hns_mac_reset(struct hns_mac_cb *mac_cb);
+void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg);
+void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en);
+int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable);
+int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu);
+int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
+			  u8 *auto_neg, u16 *speed, u8 *duplex);
+phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb);
+int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en);
+int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
+				enum hnae_loop loop, int en);
+void hns_mac_update_stats(struct hns_mac_cb *mac_cb);
+void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data);
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 *data);
+int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset);
+void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data);
+int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb);
+void hns_set_led_opt(struct hns_mac_cb *mac_cb);
+int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
+			enum hnae_led_state status);
+#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
new file mode 100644
index 0000000..26ae6c6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -0,0 +1,2445 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/device.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_rcb.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_mac.h"
+
+const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
+	[DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
+	[DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
+	[DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
+};
+
+int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
+{
+	int ret, i;
+	u32 desc_num;
+	u32 buf_size;
+	const char *name, *mode_str;
+	struct device_node *np = dsaf_dev->dev->of_node;
+
+	if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v2"))
+		dsaf_dev->dsaf_ver = AE_VERSION_2;
+	else
+		dsaf_dev->dsaf_ver = AE_VERSION_1;
+
+	ret = of_property_read_string(np, "dsa_name", &name);
+	if (ret) {
+		dev_err(dsaf_dev->dev, "get dsaf name fail, ret=%d!\n", ret);
+		return ret;
+	}
+	strncpy(dsaf_dev->ae_dev.name, name, AE_NAME_SIZE);
+	dsaf_dev->ae_dev.name[AE_NAME_SIZE - 1] = '\0';
+
+	ret = of_property_read_string(np, "mode", &mode_str);
+	if (ret) {
+		dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
+		return ret;
+	}
+	for (i = 0; i < DSAF_MODE_MAX; i++) {
+		if (g_dsaf_mode_match[i] &&
+		    !strcmp(mode_str, g_dsaf_mode_match[i]))
+			break;
+	}
+	if (i >= DSAF_MODE_MAX ||
+	    i == DSAF_MODE_INVALID || i == DSAF_MODE_ENABLE) {
+		dev_err(dsaf_dev->dev,
+			"%s prs mode str fail!\n", dsaf_dev->ae_dev.name);
+		return -EINVAL;
+	}
+	dsaf_dev->dsaf_mode = (enum dsaf_mode)i;
+
+	if (dsaf_dev->dsaf_mode > DSAF_MODE_ENABLE)
+		dsaf_dev->dsaf_en = HRD_DSAF_NO_DSAF_MODE;
+	else
+		dsaf_dev->dsaf_en = HRD_DSAF_MODE;
+
+	if ((i == DSAF_MODE_ENABLE_16VM) ||
+	    (i == DSAF_MODE_DISABLE_2PORT_8VM) ||
+	    (i == DSAF_MODE_DISABLE_6PORT_2VM))
+		dsaf_dev->dsaf_tc_mode = HRD_DSAF_8TC_MODE;
+	else
+		dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
+
+	dsaf_dev->sc_base = of_iomap(np, 0);
+	if (!dsaf_dev->sc_base) {
+		dev_err(dsaf_dev->dev,
+			"%s of_iomap 0 fail!\n", dsaf_dev->ae_dev.name);
+		ret = -ENOMEM;
+		goto unmap_base_addr;
+	}
+
+	dsaf_dev->sds_base = of_iomap(np, 1);
+	if (!dsaf_dev->sds_base) {
+		dev_err(dsaf_dev->dev,
+			"%s of_iomap 1 fail!\n", dsaf_dev->ae_dev.name);
+		ret = -ENOMEM;
+		goto unmap_base_addr;
+	}
+
+	dsaf_dev->ppe_base = of_iomap(np, 2);
+	if (!dsaf_dev->ppe_base) {
+		dev_err(dsaf_dev->dev,
+			"%s of_iomap 2 fail!\n", dsaf_dev->ae_dev.name);
+		ret = -ENOMEM;
+		goto unmap_base_addr;
+	}
+
+	dsaf_dev->io_base = of_iomap(np, 3);
+	if (!dsaf_dev->io_base) {
+		dev_err(dsaf_dev->dev,
+			"%s of_iomap 3 fail!\n", dsaf_dev->ae_dev.name);
+		ret = -ENOMEM;
+		goto unmap_base_addr;
+	}
+
+	dsaf_dev->cpld_base = of_iomap(np, 4);
+	if (!dsaf_dev->cpld_base)
+		dev_dbg(dsaf_dev->dev, "NO CPLD ADDR");
+
+	ret = of_property_read_u32(np, "desc-num", &desc_num);
+	if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
+	    desc_num > HNS_DSAF_MAX_DESC_CNT) {
+		dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n",
+			desc_num, ret);
+		goto unmap_base_addr;
+	}
+	dsaf_dev->desc_num = desc_num;
+
+	ret = of_property_read_u32(np, "buf-size", &buf_size);
+	if (ret < 0) {
+		dev_err(dsaf_dev->dev,
+			"get buf-size fail, ret=%d!\r\n", ret);
+		goto unmap_base_addr;
+	}
+	dsaf_dev->buf_size = buf_size;
+
+	dsaf_dev->buf_size_type = hns_rcb_buf_size2type(buf_size);
+	if (dsaf_dev->buf_size_type < 0) {
+		dev_err(dsaf_dev->dev,
+			"buf_size(%d) is wrong!\n", buf_size);
+		goto unmap_base_addr;
+	}
+
+	if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL)))
+		dev_dbg(dsaf_dev->dev, "set mask to 64bit\n");
+	else
+		dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n");
+
+	return 0;
+
+unmap_base_addr:
+	if (dsaf_dev->io_base)
+		iounmap(dsaf_dev->io_base);
+	if (dsaf_dev->ppe_base)
+		iounmap(dsaf_dev->ppe_base);
+	if (dsaf_dev->sds_base)
+		iounmap(dsaf_dev->sds_base);
+	if (dsaf_dev->sc_base)
+		iounmap(dsaf_dev->sc_base);
+	if (dsaf_dev->cpld_base)
+		iounmap(dsaf_dev->cpld_base);
+	return ret;
+}
+
+static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev)
+{
+	if (dsaf_dev->io_base)
+		iounmap(dsaf_dev->io_base);
+
+	if (dsaf_dev->ppe_base)
+		iounmap(dsaf_dev->ppe_base);
+
+	if (dsaf_dev->sds_base)
+		iounmap(dsaf_dev->sds_base);
+
+	if (dsaf_dev->sc_base)
+		iounmap(dsaf_dev->sc_base);
+
+	if (dsaf_dev->cpld_base)
+		iounmap(dsaf_dev->cpld_base);
+}
+
+/**
+ * hns_dsaf_sbm_link_sram_init_en - config dsaf_sbm_init_en
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_sbm_link_sram_init_en(struct dsaf_device *dsaf_dev)
+{
+	dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_SBM_INIT_S, 1);
+}
+
+/**
+ * hns_dsaf_reg_cnt_clr_ce - config hns_dsaf_reg_cnt_clr_ce
+ * @dsaf_id: dsa fabric id
+ * @hns_dsaf_reg_cnt_clr_ce: config value
+ */
+static void
+hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce)
+{
+	dsaf_set_dev_bit(dsaf_dev, DSAF_DSA_REG_CNT_CLR_CE_REG,
+			 DSAF_CNT_CLR_CE_S, reg_cnt_clr_ce);
+}
+
+/**
+ * hns_ppe_qid_cfg - config ppe qid
+ * @dsaf_id: dsa fabric id
+ * @pppe_qid_cfg: value array
+ */
+static void
+hns_dsaf_ppe_qid_cfg(struct dsaf_device *dsaf_dev, u32 qid_cfg)
+{
+	u32 i;
+
+	for (i = 0; i < DSAF_COMM_CHN; i++) {
+		dsaf_set_dev_field(dsaf_dev,
+				   DSAF_PPE_QID_CFG_0_REG + 0x0004 * i,
+				   DSAF_PPE_QID_CFG_M, DSAF_PPE_QID_CFG_S,
+				   qid_cfg);
+	}
+}
+
+/**
+ * hns_dsaf_sw_port_type_cfg - cfg sw type
+ * @dsaf_id: dsa fabric id
+ * @psw_port_type: array
+ */
+static void hns_dsaf_sw_port_type_cfg(struct dsaf_device *dsaf_dev,
+				      enum dsaf_sw_port_type port_type)
+{
+	u32 i;
+
+	for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
+		dsaf_set_dev_field(dsaf_dev,
+				   DSAF_SW_PORT_TYPE_0_REG + 0x0004 * i,
+				   DSAF_SW_PORT_TYPE_M, DSAF_SW_PORT_TYPE_S,
+				   port_type);
+	}
+}
+
+/**
+ * hns_dsaf_stp_port_type_cfg - cfg stp type
+ * @dsaf_id: dsa fabric id
+ * @pstp_port_type: array
+ */
+static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev,
+				       enum dsaf_stp_port_type port_type)
+{
+	u32 i;
+
+	for (i = 0; i < DSAF_COMM_CHN; i++) {
+		dsaf_set_dev_field(dsaf_dev,
+				   DSAF_STP_PORT_TYPE_0_REG + 0x0004 * i,
+				   DSAF_STP_PORT_TYPE_M, DSAF_STP_PORT_TYPE_S,
+				   port_type);
+	}
+}
+
+/**
+ * hns_dsaf_sbm_cfg - config sbm
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev)
+{
+	u32 o_sbm_cfg;
+	u32 i;
+
+	for (i = 0; i < DSAF_SBM_NUM; i++) {
+		o_sbm_cfg = dsaf_read_dev(dsaf_dev,
+					  DSAF_SBM_CFG_REG_0_REG + 0x80 * i);
+		dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_EN_S, 1);
+		dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_SHCUT_EN_S, 0);
+		dsaf_write_dev(dsaf_dev,
+			       DSAF_SBM_CFG_REG_0_REG + 0x80 * i, o_sbm_cfg);
+	}
+}
+
+/**
+ * hns_dsaf_sbm_cfg_mib_en - config sbm
+ * @dsaf_id: dsa fabric id
+ */
+static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev)
+{
+	u32 sbm_cfg_mib_en;
+	u32 i;
+	u32 reg;
+	u32 read_cnt;
+
+	for (i = 0; i < DSAF_SBM_NUM; i++) {
+		reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+		dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 1);
+	}
+
+	/* waitint for all sbm enable finished */
+	for (i = 0; i < DSAF_SBM_NUM; i++) {
+		read_cnt = 0;
+		reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+		do {
+			udelay(1);
+			sbm_cfg_mib_en = dsaf_get_dev_bit(
+					dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S);
+			read_cnt++;
+		} while (sbm_cfg_mib_en == 0 &&
+			read_cnt < DSAF_CFG_READ_CNT);
+
+		if (sbm_cfg_mib_en == 0) {
+			dev_err(dsaf_dev->dev,
+				"sbm_cfg_mib_en fail,%s,sbm_num=%d\n",
+				dsaf_dev->ae_dev.name, i);
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * hns_dsaf_sbm_bp_wl_cfg - config sbm
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
+{
+	u32 o_sbm_bp_cfg0;
+	u32 o_sbm_bp_cfg1;
+	u32 o_sbm_bp_cfg2;
+	u32 o_sbm_bp_cfg3;
+	u32 reg;
+	u32 i;
+
+	/* XGE */
+	for (i = 0; i < DSAF_XGE_NUM; i++) {
+		reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i;
+		o_sbm_bp_cfg0 = dsaf_read_dev(dsaf_dev, reg);
+		dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M,
+			       DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S, 512);
+		dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M,
+			       DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0);
+		dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M,
+			       DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0);
+		dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg0);
+
+		reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i;
+		o_sbm_bp_cfg1 = dsaf_read_dev(dsaf_dev, reg);
+		dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M,
+			       DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0);
+		dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M,
+			       DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0);
+		dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg1);
+
+		reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i;
+		o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg);
+		dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+			       DSAF_SBM_CFG2_SET_BUF_NUM_S, 104);
+		dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+			       DSAF_SBM_CFG2_RESET_BUF_NUM_S, 128);
+		dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2);
+
+		reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i;
+		o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg);
+		dsaf_set_field(o_sbm_bp_cfg3,
+			       DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+			       DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110);
+		dsaf_set_field(o_sbm_bp_cfg3,
+			       DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+			       DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160);
+		dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3);
+
+		/* for no enable pfc mode */
+		reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i;
+		o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg);
+		dsaf_set_field(o_sbm_bp_cfg3,
+			       DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+			       DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 128);
+		dsaf_set_field(o_sbm_bp_cfg3,
+			       DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+			       DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 192);
+		dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3);
+	}
+
+	/* PPE */
+	for (i = 0; i < DSAF_COMM_CHN; i++) {
+		reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
+		o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg);
+		dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+			       DSAF_SBM_CFG2_SET_BUF_NUM_S, 10);
+		dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+			       DSAF_SBM_CFG2_RESET_BUF_NUM_S, 12);
+		dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2);
+	}
+
+	/* RoCEE */
+	for (i = 0; i < DSAF_COMM_CHN; i++) {
+		reg = DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i;
+		o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg);
+		dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+			       DSAF_SBM_CFG2_SET_BUF_NUM_S, 2);
+		dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+			       DSAF_SBM_CFG2_RESET_BUF_NUM_S, 4);
+		dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2);
+	}
+}
+
+/**
+ * hns_dsaf_voq_bp_all_thrd_cfg -  voq
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_voq_bp_all_thrd_cfg(struct dsaf_device *dsaf_dev)
+{
+	u32 voq_bp_all_thrd;
+	u32 i;
+
+	for (i = 0; i < DSAF_VOQ_NUM; i++) {
+		voq_bp_all_thrd = dsaf_read_dev(
+			dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i);
+		if (i < DSAF_XGE_NUM) {
+			dsaf_set_field(voq_bp_all_thrd,
+				       DSAF_VOQ_BP_ALL_DOWNTHRD_M,
+				       DSAF_VOQ_BP_ALL_DOWNTHRD_S, 930);
+			dsaf_set_field(voq_bp_all_thrd,
+				       DSAF_VOQ_BP_ALL_UPTHRD_M,
+				       DSAF_VOQ_BP_ALL_UPTHRD_S, 950);
+		} else {
+			dsaf_set_field(voq_bp_all_thrd,
+				       DSAF_VOQ_BP_ALL_DOWNTHRD_M,
+				       DSAF_VOQ_BP_ALL_DOWNTHRD_S, 220);
+			dsaf_set_field(voq_bp_all_thrd,
+				       DSAF_VOQ_BP_ALL_UPTHRD_M,
+				       DSAF_VOQ_BP_ALL_UPTHRD_S, 230);
+		}
+		dsaf_write_dev(
+			dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i,
+			voq_bp_all_thrd);
+	}
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_tcam_data: addr
+ */
+static void hns_dsaf_tbl_tcam_data_cfg(
+	struct dsaf_device *dsaf_dev,
+	struct dsaf_tbl_tcam_data *ptbl_tcam_data)
+{
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_LOW_0_REG,
+		       ptbl_tcam_data->tbl_tcam_data_low);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_HIGH_0_REG,
+		       ptbl_tcam_data->tbl_tcam_data_high);
+}
+
+/**
+ * dsaf_tbl_tcam_mcast_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_tcam_mcast: addr
+ */
+static void hns_dsaf_tbl_tcam_mcast_cfg(
+	struct dsaf_device *dsaf_dev,
+	struct dsaf_tbl_tcam_mcast_cfg *mcast)
+{
+	u32 mcast_cfg4;
+
+	mcast_cfg4 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+	dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S,
+		     mcast->tbl_mcast_item_vld);
+	dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_OLD_EN_S,
+		     mcast->tbl_mcast_old_en);
+	dsaf_set_field(mcast_cfg4, DSAF_TBL_MCAST_CFG4_VM128_112_M,
+		       DSAF_TBL_MCAST_CFG4_VM128_112_S,
+		       mcast->tbl_mcast_port_msk[4]);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, mcast_cfg4);
+
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG,
+		       mcast->tbl_mcast_port_msk[3]);
+
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG,
+		       mcast->tbl_mcast_port_msk[2]);
+
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG,
+		       mcast->tbl_mcast_port_msk[1]);
+
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG,
+		       mcast->tbl_mcast_port_msk[0]);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_ucast_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_tcam_ucast: addr
+ */
+static void hns_dsaf_tbl_tcam_ucast_cfg(
+	struct dsaf_device *dsaf_dev,
+	struct dsaf_tbl_tcam_ucast_cfg *tbl_tcam_ucast)
+{
+	u32 ucast_cfg1;
+
+	ucast_cfg1 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+	dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S,
+		     tbl_tcam_ucast->tbl_ucast_mac_discard);
+	dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_ITEM_VLD_S,
+		     tbl_tcam_ucast->tbl_ucast_item_vld);
+	dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OLD_EN_S,
+		     tbl_tcam_ucast->tbl_ucast_old_en);
+	dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_DVC_S,
+		     tbl_tcam_ucast->tbl_ucast_dvc);
+	dsaf_set_field(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OUT_PORT_M,
+		       DSAF_TBL_UCAST_CFG1_OUT_PORT_S,
+		       tbl_tcam_ucast->tbl_ucast_out_port);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG, ucast_cfg1);
+}
+
+/**
+ * hns_dsaf_tbl_line_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_lin: addr
+ */
+static void hns_dsaf_tbl_line_cfg(struct dsaf_device *dsaf_dev,
+				  struct dsaf_tbl_line_cfg *tbl_lin)
+{
+	u32 tbl_line;
+
+	tbl_line = dsaf_read_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG);
+	dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_MAC_DISCARD_S,
+		     tbl_lin->tbl_line_mac_discard);
+	dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_DVC_S,
+		     tbl_lin->tbl_line_dvc);
+	dsaf_set_field(tbl_line, DSAF_TBL_LINE_CFG_OUT_PORT_M,
+		       DSAF_TBL_LINE_CFG_OUT_PORT_S,
+		       tbl_lin->tbl_line_out_port);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG, tbl_line);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_mcast_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_mcast_pul(struct dsaf_device *dsaf_dev)
+{
+	u32 o_tbl_pul;
+
+	o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+	dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+	dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_line_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_line_pul(struct dsaf_device *dsaf_dev)
+{
+	u32 tbl_pul;
+
+	tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+	dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 1);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul);
+	dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 0);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_mcast_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_data_mcast_pul(
+	struct dsaf_device *dsaf_dev)
+{
+	u32 o_tbl_pul;
+
+	o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+	dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1);
+	dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+	dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0);
+	dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_ucast_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_data_ucast_pul(
+	struct dsaf_device *dsaf_dev)
+{
+	u32 o_tbl_pul;
+
+	o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+	dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1);
+	dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 1);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+	dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0);
+	dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 0);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_stat_en - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_stat_en: addr
+ */
+static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev)
+{
+	u32 o_tbl_ctrl;
+
+	o_tbl_ctrl = dsaf_read_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG);
+	dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_LINE_LKUP_NUM_EN_S, 1);
+	dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_UC_LKUP_NUM_EN_S, 1);
+	dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_MC_LKUP_NUM_EN_S, 1);
+	dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_BC_LKUP_NUM_EN_S, 1);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG, o_tbl_ctrl);
+}
+
+/**
+ * hns_dsaf_rocee_bp_en - rocee back press enable
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev)
+{
+	dsaf_set_dev_bit(dsaf_dev, DSAF_XGE_CTRL_SIG_CFG_0_REG,
+			 DSAF_FC_XGE_TX_PAUSE_S, 1);
+}
+
+/* set msk for dsaf exception irq*/
+static void hns_dsaf_int_xge_msk_set(struct dsaf_device *dsaf_dev,
+				     u32 chnn_num, u32 mask_set)
+{
+	dsaf_write_dev(dsaf_dev,
+		       DSAF_XGE_INT_MSK_0_REG + 0x4 * chnn_num, mask_set);
+}
+
+static void hns_dsaf_int_ppe_msk_set(struct dsaf_device *dsaf_dev,
+				     u32 chnn_num, u32 msk_set)
+{
+	dsaf_write_dev(dsaf_dev,
+		       DSAF_PPE_INT_MSK_0_REG + 0x4 * chnn_num, msk_set);
+}
+
+static void hns_dsaf_int_rocee_msk_set(struct dsaf_device *dsaf_dev,
+				       u32 chnn, u32 msk_set)
+{
+	dsaf_write_dev(dsaf_dev,
+		       DSAF_ROCEE_INT_MSK_0_REG + 0x4 * chnn, msk_set);
+}
+
+static void
+hns_dsaf_int_tbl_msk_set(struct dsaf_device *dsaf_dev, u32 msk_set)
+{
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_MSK_0_REG, msk_set);
+}
+
+/* clr dsaf exception irq*/
+static void hns_dsaf_int_xge_src_clr(struct dsaf_device *dsaf_dev,
+				     u32 chnn_num, u32 int_src)
+{
+	dsaf_write_dev(dsaf_dev,
+		       DSAF_XGE_INT_SRC_0_REG + 0x4 * chnn_num, int_src);
+}
+
+static void hns_dsaf_int_ppe_src_clr(struct dsaf_device *dsaf_dev,
+				     u32 chnn, u32 int_src)
+{
+	dsaf_write_dev(dsaf_dev,
+		       DSAF_PPE_INT_SRC_0_REG + 0x4 * chnn, int_src);
+}
+
+static void hns_dsaf_int_rocee_src_clr(struct dsaf_device *dsaf_dev,
+				       u32 chnn, u32 int_src)
+{
+	dsaf_write_dev(dsaf_dev,
+		       DSAF_ROCEE_INT_SRC_0_REG + 0x4 * chnn, int_src);
+}
+
+static void hns_dsaf_int_tbl_src_clr(struct dsaf_device *dsaf_dev,
+				     u32 int_src)
+{
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_SRC_0_REG, int_src);
+}
+
+/**
+ * hns_dsaf_single_line_tbl_cfg - INT
+ * @dsaf_id: dsa fabric id
+ * @address:
+ * @ptbl_line:
+ */
+static void hns_dsaf_single_line_tbl_cfg(
+	struct dsaf_device *dsaf_dev,
+	u32 address, struct dsaf_tbl_line_cfg *ptbl_line)
+{
+	/*Write Addr*/
+	hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address);
+
+	/*Write Line*/
+	hns_dsaf_tbl_line_cfg(dsaf_dev, ptbl_line);
+
+	/*Write Plus*/
+	hns_dsaf_tbl_line_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_uc_cfg - INT
+ * @dsaf_id: dsa fabric id
+ * @address,
+ * @ptbl_tcam_data,
+ */
+static void hns_dsaf_tcam_uc_cfg(
+	struct dsaf_device *dsaf_dev, u32 address,
+	struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+	struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
+{
+	/*Write Addr*/
+	hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+	/*Write Tcam Data*/
+	hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data);
+	/*Write Tcam Ucast*/
+	hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast);
+	/*Write Plus*/
+	hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg - INT
+ * @dsaf_id: dsa fabric id
+ * @address,
+ * @ptbl_tcam_data,
+ * @ptbl_tcam_mcast,
+ */
+static void hns_dsaf_tcam_mc_cfg(
+	struct dsaf_device *dsaf_dev, u32 address,
+	struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+	struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
+{
+	/*Write Addr*/
+	hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+	/*Write Tcam Data*/
+	hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data);
+	/*Write Tcam Mcast*/
+	hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast);
+	/*Write Plus*/
+	hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_mc_invld - INT
+ * @dsaf_id: dsa fabric id
+ * @address
+ */
+static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
+{
+	/*Write Addr*/
+	hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+	/*write tcam mcast*/
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG, 0);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG, 0);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG, 0);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG, 0);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, 0);
+
+	/*Write Plus*/
+	hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_uc_get - INT
+ * @dsaf_id: dsa fabric id
+ * @address
+ * @ptbl_tcam_data
+ * @ptbl_tcam_ucast
+ */
+static void hns_dsaf_tcam_uc_get(
+	struct dsaf_device *dsaf_dev, u32 address,
+	struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+	struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
+{
+	u32 tcam_read_data0;
+	u32 tcam_read_data4;
+
+	/*Write Addr*/
+	hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+	/*read tcam item puls*/
+	hns_dsaf_tbl_tcam_load_pul(dsaf_dev);
+
+	/*read tcam data*/
+	ptbl_tcam_data->tbl_tcam_data_high
+		= dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+	ptbl_tcam_data->tbl_tcam_data_low
+		= dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+
+	/*read tcam mcast*/
+	tcam_read_data0 = dsaf_read_dev(dsaf_dev,
+					DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+	tcam_read_data4 = dsaf_read_dev(dsaf_dev,
+					DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+
+	ptbl_tcam_ucast->tbl_ucast_item_vld
+		= dsaf_get_bit(tcam_read_data4,
+			       DSAF_TBL_MCAST_CFG4_ITEM_VLD_S);
+	ptbl_tcam_ucast->tbl_ucast_old_en
+		= dsaf_get_bit(tcam_read_data4, DSAF_TBL_MCAST_CFG4_OLD_EN_S);
+	ptbl_tcam_ucast->tbl_ucast_mac_discard
+		= dsaf_get_bit(tcam_read_data0,
+			       DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S);
+	ptbl_tcam_ucast->tbl_ucast_out_port
+		= dsaf_get_field(tcam_read_data0,
+				 DSAF_TBL_UCAST_CFG1_OUT_PORT_M,
+				 DSAF_TBL_UCAST_CFG1_OUT_PORT_S);
+	ptbl_tcam_ucast->tbl_ucast_dvc
+		= dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S);
+}
+
+/**
+ * hns_dsaf_tcam_mc_get - INT
+ * @dsaf_id: dsa fabric id
+ * @address
+ * @ptbl_tcam_data
+ * @ptbl_tcam_ucast
+ */
+static void hns_dsaf_tcam_mc_get(
+	struct dsaf_device *dsaf_dev, u32 address,
+	struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+	struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
+{
+	u32 data_tmp;
+
+	/*Write Addr*/
+	hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+	/*read tcam item puls*/
+	hns_dsaf_tbl_tcam_load_pul(dsaf_dev);
+
+	/*read tcam data*/
+	ptbl_tcam_data->tbl_tcam_data_high =
+		dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+	ptbl_tcam_data->tbl_tcam_data_low =
+		dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+
+	/*read tcam mcast*/
+	ptbl_tcam_mcast->tbl_mcast_port_msk[0] =
+		dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+	ptbl_tcam_mcast->tbl_mcast_port_msk[1] =
+		dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+	ptbl_tcam_mcast->tbl_mcast_port_msk[2] =
+		dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+	ptbl_tcam_mcast->tbl_mcast_port_msk[3] =
+		dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+
+	data_tmp = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+	ptbl_tcam_mcast->tbl_mcast_item_vld =
+		dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S);
+	ptbl_tcam_mcast->tbl_mcast_old_en =
+		dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_OLD_EN_S);
+	ptbl_tcam_mcast->tbl_mcast_port_msk[4] =
+		dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M,
+			       DSAF_TBL_MCAST_CFG4_VM128_112_S);
+}
+
+/**
+ * hns_dsaf_tbl_line_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_line_init(struct dsaf_device *dsaf_dev)
+{
+	u32 i;
+	/* defaultly set all lineal mac table entry resulting discard */
+	struct dsaf_tbl_line_cfg tbl_line[] = {{1, 0, 0} };
+
+	for (i = 0; i < DSAF_LINE_SUM; i++)
+		hns_dsaf_single_line_tbl_cfg(dsaf_dev, i, tbl_line);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
+{
+	u32 i;
+	struct dsaf_tbl_tcam_data tcam_data[] = {{0, 0} };
+	struct dsaf_tbl_tcam_ucast_cfg tcam_ucast[] = {{0, 0, 0, 0, 0} };
+
+	/*tcam tbl*/
+	for (i = 0; i < DSAF_TCAM_SUM; i++)
+		hns_dsaf_tcam_uc_cfg(dsaf_dev, i, tcam_data, tcam_ucast);
+}
+
+/**
+ * hns_dsaf_pfc_en_cfg - dsaf pfc pause cfg
+ * @mac_cb: mac contrl block
+ */
+static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev,
+				int mac_id, int en)
+{
+	if (!en)
+		dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0);
+	else
+		dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0xff);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_init - INT
+ * @dsaf_id: dsa fabric id
+ * @dsaf_mode
+ */
+static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
+{
+	u32 i;
+	u32 o_dsaf_cfg;
+
+	o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG);
+	dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en);
+	dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_TC_MODE_S, dsaf_dev->dsaf_tc_mode);
+	dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_CRC_EN_S, 0);
+	dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_MIX_MODE_S, 0);
+	dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_LOCA_ADDR_EN_S, 0);
+	dsaf_write_dev(dsaf_dev, DSAF_CFG_0_REG, o_dsaf_cfg);
+
+	hns_dsaf_reg_cnt_clr_ce(dsaf_dev, 1);
+	hns_dsaf_stp_port_type_cfg(dsaf_dev, DSAF_STP_PORT_TYPE_FORWARD);
+
+	/* set 22 queue per tx ppe engine, only used in switch mode */
+	hns_dsaf_ppe_qid_cfg(dsaf_dev, DSAF_DEFAUTL_QUEUE_NUM_PER_PPE);
+
+	/* in non switch mode, set all port to access mode */
+	hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
+
+	/*set dsaf pfc  to 0 for parseing rx pause*/
+	for (i = 0; i < DSAF_COMM_CHN; i++)
+		hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0);
+
+	/*msk and  clr exception irqs */
+	for (i = 0; i < DSAF_COMM_CHN; i++) {
+		hns_dsaf_int_xge_src_clr(dsaf_dev, i, 0xfffffffful);
+		hns_dsaf_int_ppe_src_clr(dsaf_dev, i, 0xfffffffful);
+		hns_dsaf_int_rocee_src_clr(dsaf_dev, i, 0xfffffffful);
+
+		hns_dsaf_int_xge_msk_set(dsaf_dev, i, 0xfffffffful);
+		hns_dsaf_int_ppe_msk_set(dsaf_dev, i, 0xfffffffful);
+		hns_dsaf_int_rocee_msk_set(dsaf_dev, i, 0xfffffffful);
+	}
+	hns_dsaf_int_tbl_src_clr(dsaf_dev, 0xfffffffful);
+	hns_dsaf_int_tbl_msk_set(dsaf_dev, 0xfffffffful);
+}
+
+/**
+ * hns_dsaf_inode_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev)
+{
+	u32 reg;
+	u32 tc_cfg;
+	u32 i;
+
+	if (dsaf_dev->dsaf_tc_mode == HRD_DSAF_4TC_MODE)
+		tc_cfg = HNS_DSAF_I4TC_CFG;
+	else
+		tc_cfg = HNS_DSAF_I8TC_CFG;
+
+	for (i = 0; i < DSAF_INODE_NUM; i++) {
+		reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i;
+		dsaf_set_dev_field(dsaf_dev, reg, DSAF_INODE_IN_PORT_NUM_M,
+				   DSAF_INODE_IN_PORT_NUM_S, i % DSAF_XGE_NUM);
+
+		reg = DSAF_INODE_PRI_TC_CFG_0_REG + 0x80 * i;
+		dsaf_write_dev(dsaf_dev, reg, tc_cfg);
+	}
+}
+
+/**
+ * hns_dsaf_sbm_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev)
+{
+	u32 flag;
+	u32 cnt = 0;
+	int ret;
+
+	hns_dsaf_sbm_bp_wl_cfg(dsaf_dev);
+
+	/* enable sbm chanel, disable sbm chanel shcut function*/
+	hns_dsaf_sbm_cfg(dsaf_dev);
+
+	/* enable sbm mib */
+	ret = hns_dsaf_sbm_cfg_mib_en(dsaf_dev);
+	if (ret) {
+		dev_err(dsaf_dev->dev,
+			"hns_dsaf_sbm_cfg_mib_en fail,%s, ret=%d\n",
+			dsaf_dev->ae_dev.name, ret);
+		return ret;
+	}
+
+	/* enable sbm initial link sram */
+	hns_dsaf_sbm_link_sram_init_en(dsaf_dev);
+
+	do {
+		usleep_range(200, 210);/*udelay(200);*/
+		flag = dsaf_read_dev(dsaf_dev, DSAF_SRAM_INIT_OVER_0_REG);
+		cnt++;
+	} while (flag != DSAF_SRAM_INIT_FINISH_FLAG && cnt < DSAF_CFG_READ_CNT);
+
+	if (flag != DSAF_SRAM_INIT_FINISH_FLAG) {
+		dev_err(dsaf_dev->dev,
+			"hns_dsaf_sbm_init fail %s, flag=%d, cnt=%d\n",
+			dsaf_dev->ae_dev.name, flag, cnt);
+		return -ENODEV;
+	}
+
+	hns_dsaf_rocee_bp_en(dsaf_dev);
+
+	return 0;
+}
+
+/**
+ * hns_dsaf_tbl_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_init(struct dsaf_device *dsaf_dev)
+{
+	hns_dsaf_tbl_stat_en(dsaf_dev);
+
+	hns_dsaf_tbl_tcam_init(dsaf_dev);
+	hns_dsaf_tbl_line_init(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_voq_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_voq_init(struct dsaf_device *dsaf_dev)
+{
+	hns_dsaf_voq_bp_all_thrd_cfg(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_init_hw - init dsa fabric hardware
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
+{
+	int ret;
+
+	dev_dbg(dsaf_dev->dev,
+		"hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name);
+
+	hns_dsaf_rst(dsaf_dev, 0);
+	mdelay(10);
+	hns_dsaf_rst(dsaf_dev, 1);
+
+	hns_dsaf_comm_init(dsaf_dev);
+
+	/*init XBAR_INODE*/
+	hns_dsaf_inode_init(dsaf_dev);
+
+	/*init SBM*/
+	ret = hns_dsaf_sbm_init(dsaf_dev);
+	if (ret)
+		return ret;
+
+	/*init TBL*/
+	hns_dsaf_tbl_init(dsaf_dev);
+
+	/*init VOQ*/
+	hns_dsaf_voq_init(dsaf_dev);
+
+	return 0;
+}
+
+/**
+ * hns_dsaf_remove_hw - uninit dsa fabric hardware
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev)
+{
+	/*reset*/
+	hns_dsaf_rst(dsaf_dev, 0);
+}
+
+/**
+ * hns_dsaf_init - init dsa fabric
+ * @dsaf_dev: dsa fabric device struct pointer
+ * retuen 0 - success , negative --fail
+ */
+static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
+{
+	struct dsaf_drv_priv *priv =
+	    (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+	u32 i;
+	int ret;
+
+	ret = hns_dsaf_init_hw(dsaf_dev);
+	if (ret)
+		return ret;
+
+	/* malloc mem for tcam mac key(vlan+mac) */
+	priv->soft_mac_tbl = vzalloc(sizeof(*priv->soft_mac_tbl)
+		  * DSAF_TCAM_SUM);
+	if (!priv->soft_mac_tbl) {
+		ret = -ENOMEM;
+		goto remove_hw;
+	}
+
+	/*all entry invall */
+	for (i = 0; i < DSAF_TCAM_SUM; i++)
+		(priv->soft_mac_tbl + i)->index = DSAF_INVALID_ENTRY_IDX;
+
+	return 0;
+
+remove_hw:
+	hns_dsaf_remove_hw(dsaf_dev);
+	return ret;
+}
+
+/**
+ * hns_dsaf_free - free dsa fabric
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static void hns_dsaf_free(struct dsaf_device *dsaf_dev)
+{
+	struct dsaf_drv_priv *priv =
+	    (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+
+	hns_dsaf_remove_hw(dsaf_dev);
+
+	/* free all mac mem */
+	vfree(priv->soft_mac_tbl);
+	priv->soft_mac_tbl = NULL;
+}
+
+/**
+ * hns_dsaf_find_soft_mac_entry - find dsa fabric soft entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_key: mac entry struct pointer
+ */
+static u16 hns_dsaf_find_soft_mac_entry(
+	struct dsaf_device *dsaf_dev,
+	struct dsaf_drv_tbl_tcam_key *mac_key)
+{
+	struct dsaf_drv_priv *priv =
+	    (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+	struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+	u32 i;
+
+	soft_mac_entry = priv->soft_mac_tbl;
+	for (i = 0; i < DSAF_TCAM_SUM; i++) {
+		/* invall tab entry */
+		if ((soft_mac_entry->index != DSAF_INVALID_ENTRY_IDX) &&
+		    (soft_mac_entry->tcam_key.high.val == mac_key->high.val) &&
+		    (soft_mac_entry->tcam_key.low.val == mac_key->low.val))
+			/* return find result --soft index */
+			return soft_mac_entry->index;
+
+		soft_mac_entry++;
+	}
+	return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_find_empty_mac_entry - search dsa fabric soft empty-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
+{
+	struct dsaf_drv_priv *priv =
+	    (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+	struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+	u32 i;
+
+	soft_mac_entry = priv->soft_mac_tbl;
+	for (i = 0; i < DSAF_TCAM_SUM; i++) {
+		/* inv all entry */
+		if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+			/* return find result --soft index */
+			return i;
+
+		soft_mac_entry++;
+	}
+	return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_set_mac_key - set mac key
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_key: tcam key pointer
+ * @vlan_id: vlan id
+ * @in_port_num: input port num
+ * @addr: mac addr
+ */
+static void hns_dsaf_set_mac_key(
+	struct dsaf_device *dsaf_dev,
+	struct dsaf_drv_tbl_tcam_key *mac_key, u16 vlan_id, u8 in_port_num,
+	u8 *addr)
+{
+	u8 port;
+
+	if (dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE)
+		/*DSAF mode : in port id fixed 0*/
+		port = 0;
+	else
+		/*non-dsaf mode*/
+		port = in_port_num;
+
+	mac_key->high.bits.mac_0 = addr[0];
+	mac_key->high.bits.mac_1 = addr[1];
+	mac_key->high.bits.mac_2 = addr[2];
+	mac_key->high.bits.mac_3 = addr[3];
+	mac_key->low.bits.mac_4 = addr[4];
+	mac_key->low.bits.mac_5 = addr[5];
+	mac_key->low.bits.vlan = vlan_id;
+	mac_key->low.bits.port = port;
+}
+
+/**
+ * hns_dsaf_set_mac_uc_entry - set mac uc-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: uc-mac entry
+ */
+int hns_dsaf_set_mac_uc_entry(
+	struct dsaf_device *dsaf_dev,
+	struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+	u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+	struct dsaf_drv_tbl_tcam_key mac_key;
+	struct dsaf_tbl_tcam_ucast_cfg mac_data;
+	struct dsaf_drv_priv *priv =
+	    (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+	struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+
+	/* mac addr check */
+	if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+	    MAC_IS_BROADCAST(mac_entry->addr) ||
+	    MAC_IS_MULTICAST(mac_entry->addr)) {
+		dev_err(dsaf_dev->dev,
+			"set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n",
+			dsaf_dev->ae_dev.name, mac_entry->addr[0],
+			mac_entry->addr[1], mac_entry->addr[2],
+			mac_entry->addr[3], mac_entry->addr[4],
+			mac_entry->addr[5]);
+		return -EINVAL;
+	}
+
+	/* config key */
+	hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+			     mac_entry->in_port_num, mac_entry->addr);
+
+	/* entry ie exist? */
+	entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+	if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+		/*if has not inv entry,find a empty entry */
+		entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+		if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+			/* has not empty,return error */
+			dev_err(dsaf_dev->dev,
+				"set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+				dsaf_dev->ae_dev.name,
+				mac_key.high.val, mac_key.low.val);
+			return -EINVAL;
+		}
+	}
+
+	dev_dbg(dsaf_dev->dev,
+		"set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+		dsaf_dev->ae_dev.name, mac_key.high.val,
+		mac_key.low.val, entry_index);
+
+	/* config hardware entry */
+	mac_data.tbl_ucast_item_vld = 1;
+	mac_data.tbl_ucast_mac_discard = 0;
+	mac_data.tbl_ucast_old_en = 0;
+	/* default config dvc to 0 */
+	mac_data.tbl_ucast_dvc = 0;
+	mac_data.tbl_ucast_out_port = mac_entry->port_num;
+	hns_dsaf_tcam_uc_cfg(
+		dsaf_dev, entry_index,
+		(struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+
+	/* config software entry */
+	soft_mac_entry += entry_index;
+	soft_mac_entry->index = entry_index;
+	soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+	soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+	return 0;
+}
+
+/**
+ * hns_dsaf_set_mac_mc_entry - set mac mc-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mc-mac entry
+ */
+int hns_dsaf_set_mac_mc_entry(
+	struct dsaf_device *dsaf_dev,
+	struct dsaf_drv_mac_multi_dest_entry *mac_entry)
+{
+	u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+	struct dsaf_drv_tbl_tcam_key mac_key;
+	struct dsaf_tbl_tcam_mcast_cfg mac_data;
+	struct dsaf_drv_priv *priv =
+	    (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+	struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+	struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+
+	/* mac addr check */
+	if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+		dev_err(dsaf_dev->dev,
+			"set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n",
+			dsaf_dev->ae_dev.name, mac_entry->addr[0],
+			mac_entry->addr[1], mac_entry->addr[2],
+			mac_entry->addr[3],
+			mac_entry->addr[4], mac_entry->addr[5]);
+		return -EINVAL;
+	}
+
+	/*config key */
+	hns_dsaf_set_mac_key(dsaf_dev, &mac_key,
+			     mac_entry->in_vlan_id,
+			     mac_entry->in_port_num, mac_entry->addr);
+
+	/* entry ie exist? */
+	entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+	if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+		/*if hasnot, find enpty entry*/
+		entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+		if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+			/*if hasnot empty, error*/
+			dev_err(dsaf_dev->dev,
+				"set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+				dsaf_dev->ae_dev.name,
+				mac_key.high.val, mac_key.low.val);
+			return -EINVAL;
+		}
+
+		/* config hardware entry */
+		memset(mac_data.tbl_mcast_port_msk,
+		       0, sizeof(mac_data.tbl_mcast_port_msk));
+	} else {
+		/* config hardware entry */
+		hns_dsaf_tcam_mc_get(
+			dsaf_dev, entry_index,
+			(struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+	}
+	mac_data.tbl_mcast_old_en = 0;
+	mac_data.tbl_mcast_item_vld = 1;
+	dsaf_set_field(mac_data.tbl_mcast_port_msk[0],
+		       0x3F, 0, mac_entry->port_mask[0]);
+
+	dev_dbg(dsaf_dev->dev,
+		"set_uc_entry, %s key(%#x:%#x) entry_index%d\n",
+		dsaf_dev->ae_dev.name, mac_key.high.val,
+		mac_key.low.val, entry_index);
+
+	hns_dsaf_tcam_mc_cfg(
+		dsaf_dev, entry_index,
+		(struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+
+	/* config software entry */
+	soft_mac_entry += entry_index;
+	soft_mac_entry->index = entry_index;
+	soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+	soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+	return 0;
+}
+
+/**
+ * hns_dsaf_add_mac_mc_port - add mac mc-port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mc-mac entry
+ */
+int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
+			     struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+	u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+	struct dsaf_drv_tbl_tcam_key mac_key;
+	struct dsaf_tbl_tcam_mcast_cfg mac_data;
+	struct dsaf_drv_priv *priv =
+	    (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+	struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+	struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+	int mskid;
+
+	/*chechk mac addr */
+	if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+		dev_err(dsaf_dev->dev,
+			"set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
+			mac_entry->addr[0], mac_entry->addr[1],
+			mac_entry->addr[2], mac_entry->addr[3],
+			mac_entry->addr[4], mac_entry->addr[5]);
+		return -EINVAL;
+	}
+
+	/*config key */
+	hns_dsaf_set_mac_key(
+		dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+		mac_entry->in_port_num, mac_entry->addr);
+
+	memset(&mac_data, 0, sizeof(struct dsaf_tbl_tcam_mcast_cfg));
+
+	/*check exist? */
+	entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+	if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+		/*if hasnot , find a empty*/
+		entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+		if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+			/*if hasnot empty, error*/
+			dev_err(dsaf_dev->dev,
+				"set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+				dsaf_dev->ae_dev.name, mac_key.high.val,
+				mac_key.low.val);
+			return -EINVAL;
+		}
+	} else {
+		/*if exist, add in */
+		hns_dsaf_tcam_mc_get(
+			dsaf_dev, entry_index,
+			(struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+	}
+	/* config hardware entry */
+	if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
+		mskid = mac_entry->port_num;
+	} else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) {
+		mskid = mac_entry->port_num -
+			DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+	} else {
+		dev_err(dsaf_dev->dev,
+			"%s,pnum(%d)error,key(%#x:%#x)\n",
+			dsaf_dev->ae_dev.name, mac_entry->port_num,
+			mac_key.high.val, mac_key.low.val);
+		return -EINVAL;
+	}
+	dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 1);
+	mac_data.tbl_mcast_old_en = 0;
+	mac_data.tbl_mcast_item_vld = 1;
+
+	dev_dbg(dsaf_dev->dev,
+		"set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+		dsaf_dev->ae_dev.name, mac_key.high.val,
+		mac_key.low.val, entry_index);
+
+	hns_dsaf_tcam_mc_cfg(
+		dsaf_dev, entry_index,
+		(struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+
+	/*config software entry */
+	soft_mac_entry += entry_index;
+	soft_mac_entry->index = entry_index;
+	soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+	soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+	return 0;
+}
+
+/**
+ * hns_dsaf_del_mac_entry - del mac mc-port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @vlan_id: vlian id
+ * @in_port_num: input port num
+ * @addr : mac addr
+ */
+int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
+			   u8 in_port_num, u8 *addr)
+{
+	u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+	struct dsaf_drv_tbl_tcam_key mac_key;
+	struct dsaf_drv_priv *priv =
+	    (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+	struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+
+	/*check mac addr */
+	if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) {
+		dev_err(dsaf_dev->dev,
+			"del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
+			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+		return -EINVAL;
+	}
+
+	/*config key */
+	hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num, addr);
+
+	/*exist ?*/
+	entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+	if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+		/*not exist, error */
+		dev_err(dsaf_dev->dev,
+			"del_mac_entry failed, %s Mac key(%#x:%#x)\n",
+			dsaf_dev->ae_dev.name,
+			mac_key.high.val, mac_key.low.val);
+		return -EINVAL;
+	}
+	dev_dbg(dsaf_dev->dev,
+		"del_mac_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+		dsaf_dev->ae_dev.name, mac_key.high.val,
+		mac_key.low.val, entry_index);
+
+	/*do del opt*/
+	hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index);
+
+	/*del soft emtry */
+	soft_mac_entry += entry_index;
+	soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+
+	return 0;
+}
+
+/**
+ * hns_dsaf_del_mac_mc_port - del mac mc- port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
+			     struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+	u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+	struct dsaf_drv_tbl_tcam_key mac_key;
+	struct dsaf_drv_priv *priv =
+	    (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+	struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+	u16 vlan_id;
+	u8 in_port_num;
+	struct dsaf_tbl_tcam_mcast_cfg mac_data;
+	struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+	int mskid;
+	const u8 empty_msk[sizeof(mac_data.tbl_mcast_port_msk)] = {0};
+
+	if (!(void *)mac_entry) {
+		dev_err(dsaf_dev->dev,
+			"hns_dsaf_del_mac_mc_port mac_entry is NULL\n");
+		return -EINVAL;
+	}
+
+	/*get key info*/
+	vlan_id = mac_entry->in_vlan_id;
+	in_port_num = mac_entry->in_port_num;
+
+	/*check mac addr */
+	if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+		dev_err(dsaf_dev->dev,
+			"del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
+			mac_entry->addr[0], mac_entry->addr[1],
+			mac_entry->addr[2], mac_entry->addr[3],
+			mac_entry->addr[4], mac_entry->addr[5]);
+		return -EINVAL;
+	}
+
+	/*config key */
+	hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num,
+			     mac_entry->addr);
+
+	/*check is exist? */
+	entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+	if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+		/*find none */
+		dev_err(dsaf_dev->dev,
+			"find_soft_mac_entry failed, %s Mac key(%#x:%#x)\n",
+			dsaf_dev->ae_dev.name,
+			mac_key.high.val, mac_key.low.val);
+		return -EINVAL;
+	}
+
+	dev_dbg(dsaf_dev->dev,
+		"del_mac_mc_port, %s key(%#x:%#x) index%d\n",
+		dsaf_dev->ae_dev.name, mac_key.high.val,
+		mac_key.low.val, entry_index);
+
+	/*read entry*/
+	hns_dsaf_tcam_mc_get(
+		dsaf_dev, entry_index,
+		(struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+
+	/*del the port*/
+	if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
+		mskid = mac_entry->port_num;
+	} else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) {
+		mskid = mac_entry->port_num -
+			DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+	} else {
+		dev_err(dsaf_dev->dev,
+			"%s,pnum(%d)error,key(%#x:%#x)\n",
+			dsaf_dev->ae_dev.name, mac_entry->port_num,
+			mac_key.high.val, mac_key.low.val);
+		return -EINVAL;
+	}
+	dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 0);
+
+	/*check non port, do del entry */
+	if (!memcmp(mac_data.tbl_mcast_port_msk, empty_msk,
+		    sizeof(mac_data.tbl_mcast_port_msk))) {
+		hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index);
+
+		/* del soft entry */
+		soft_mac_entry += entry_index;
+		soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+	} else { /* not zer, just del port, updata*/
+		hns_dsaf_tcam_mc_cfg(
+			dsaf_dev, entry_index,
+			(struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+	}
+
+	return 0;
+}
+
+/**
+ * hns_dsaf_get_mac_uc_entry - get mac uc entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
+			      struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+	u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+	struct dsaf_drv_tbl_tcam_key mac_key;
+
+	struct dsaf_tbl_tcam_ucast_cfg mac_data;
+
+	/* check macaddr */
+	if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+	    MAC_IS_BROADCAST(mac_entry->addr)) {
+		dev_err(dsaf_dev->dev,
+			"get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+			mac_entry->addr[0], mac_entry->addr[1],
+			mac_entry->addr[2], mac_entry->addr[3],
+			mac_entry->addr[4], mac_entry->addr[5]);
+		return -EINVAL;
+	}
+
+	/*config key */
+	hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+			     mac_entry->in_port_num, mac_entry->addr);
+
+	/*check exist? */
+	entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+	if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+		/*find none, error */
+		dev_err(dsaf_dev->dev,
+			"get_uc_entry failed, %s Mac key(%#x:%#x)\n",
+			dsaf_dev->ae_dev.name,
+			mac_key.high.val, mac_key.low.val);
+		return -EINVAL;
+	}
+	dev_dbg(dsaf_dev->dev,
+		"get_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+		dsaf_dev->ae_dev.name, mac_key.high.val,
+		mac_key.low.val, entry_index);
+
+	/*read entry*/
+	hns_dsaf_tcam_uc_get(dsaf_dev, entry_index,
+			     (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+	mac_entry->port_num = mac_data.tbl_ucast_out_port;
+
+	return 0;
+}
+
+/**
+ * hns_dsaf_get_mac_mc_entry - get mac mc entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
+			      struct dsaf_drv_mac_multi_dest_entry *mac_entry)
+{
+	u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+	struct dsaf_drv_tbl_tcam_key mac_key;
+
+	struct dsaf_tbl_tcam_mcast_cfg mac_data;
+
+	/*check mac addr */
+	if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+	    MAC_IS_BROADCAST(mac_entry->addr)) {
+		dev_err(dsaf_dev->dev,
+			"get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+			mac_entry->addr[0], mac_entry->addr[1],
+			mac_entry->addr[2], mac_entry->addr[3],
+			mac_entry->addr[4], mac_entry->addr[5]);
+		return -EINVAL;
+	}
+
+	/*config key */
+	hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+			     mac_entry->in_port_num, mac_entry->addr);
+
+	/*check exist? */
+	entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+	if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+		/* find none, error */
+		dev_err(dsaf_dev->dev,
+			"get_mac_uc_entry failed, %s Mac key(%#x:%#x)\n",
+			dsaf_dev->ae_dev.name, mac_key.high.val,
+			mac_key.low.val);
+		return -EINVAL;
+	}
+	dev_dbg(dsaf_dev->dev,
+		"get_mac_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+		dsaf_dev->ae_dev.name, mac_key.high.val,
+		mac_key.low.val, entry_index);
+
+	/*read entry */
+	hns_dsaf_tcam_mc_get(dsaf_dev, entry_index,
+			     (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+
+	mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
+	return 0;
+}
+
+/**
+ * hns_dsaf_get_mac_entry_by_index - get mac entry by tab index
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @entry_index: tab entry index
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_get_mac_entry_by_index(
+	struct dsaf_device *dsaf_dev,
+	u16 entry_index, struct dsaf_drv_mac_multi_dest_entry *mac_entry)
+{
+	struct dsaf_drv_tbl_tcam_key mac_key;
+
+	struct dsaf_tbl_tcam_mcast_cfg mac_data;
+	struct dsaf_tbl_tcam_ucast_cfg mac_uc_data;
+	char mac_addr[MAC_NUM_OCTETS_PER_ADDR] = {0};
+
+	if (entry_index >= DSAF_TCAM_SUM) {
+		/* find none, del error */
+		dev_err(dsaf_dev->dev, "get_uc_entry failed, %s\n",
+			dsaf_dev->ae_dev.name);
+		return -EINVAL;
+	}
+
+	/* mc entry, do read opt */
+	hns_dsaf_tcam_mc_get(dsaf_dev, entry_index,
+			     (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+
+	mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
+
+	/***get mac addr*/
+	mac_addr[0] = mac_key.high.bits.mac_0;
+	mac_addr[1] = mac_key.high.bits.mac_1;
+	mac_addr[2] = mac_key.high.bits.mac_2;
+	mac_addr[3] = mac_key.high.bits.mac_3;
+	mac_addr[4] = mac_key.low.bits.mac_4;
+	mac_addr[5] = mac_key.low.bits.mac_5;
+	/**is mc or uc*/
+	if (MAC_IS_MULTICAST((u8 *)mac_addr) ||
+	    MAC_IS_L3_MULTICAST((u8 *)mac_addr)) {
+		/**mc donot do*/
+	} else {
+		/*is not mc, just uc... */
+		hns_dsaf_tcam_uc_get(dsaf_dev, entry_index,
+				     (struct dsaf_tbl_tcam_data *)&mac_key,
+				     &mac_uc_data);
+		mac_entry->port_mask[0] = (1 << mac_uc_data.tbl_ucast_out_port);
+	}
+
+	return 0;
+}
+
+static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev,
+					      size_t sizeof_priv)
+{
+	struct dsaf_device *dsaf_dev;
+
+	dsaf_dev = devm_kzalloc(dev,
+				sizeof(*dsaf_dev) + sizeof_priv, GFP_KERNEL);
+	if (unlikely(!dsaf_dev)) {
+		dsaf_dev = ERR_PTR(-ENOMEM);
+	} else {
+		dsaf_dev->dev = dev;
+		dev_set_drvdata(dev, dsaf_dev);
+	}
+
+	return dsaf_dev;
+}
+
+/**
+ * hns_dsaf_free_dev - free dev mem
+ * @dev: struct device pointer
+ */
+static void hns_dsaf_free_dev(struct dsaf_device *dsaf_dev)
+{
+	(void)dev_set_drvdata(dsaf_dev->dev, NULL);
+}
+
+/**
+ * dsaf_pfc_unit_cnt - set pfc unit count
+ * @dsaf_id: dsa fabric id
+ * @pport_rate:  value array
+ * @pdsaf_pfc_unit_cnt:  value array
+ */
+static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int  mac_id,
+				  enum dsaf_port_rate_mode rate)
+{
+	u32 unit_cnt;
+
+	switch (rate) {
+	case DSAF_PORT_RATE_10000:
+		unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE;
+		break;
+	case DSAF_PORT_RATE_1000:
+		unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000;
+		break;
+	case DSAF_PORT_RATE_2500:
+		unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000;
+		break;
+	default:
+		unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE;
+	}
+
+	dsaf_set_dev_field(dsaf_dev,
+			   (DSAF_PFC_UNIT_CNT_0_REG + 0x4 * (u64)mac_id),
+			   DSAF_PFC_UNINT_CNT_M, DSAF_PFC_UNINT_CNT_S,
+			   unit_cnt);
+}
+
+/**
+ * dsaf_port_work_rate_cfg - fifo
+ * @dsaf_id: dsa fabric id
+ * @xge_ge_work_mode
+ */
+void hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
+				 enum dsaf_port_rate_mode rate_mode)
+{
+	u32 port_work_mode;
+
+	port_work_mode = dsaf_read_dev(
+		dsaf_dev, DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id);
+
+	if (rate_mode == DSAF_PORT_RATE_10000)
+		dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 1);
+	else
+		dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 0);
+
+	dsaf_write_dev(dsaf_dev,
+		       DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id,
+		       port_work_mode);
+
+	hns_dsaf_pfc_unit_cnt(dsaf_dev, mac_id, rate_mode);
+}
+
+/**
+ * hns_dsaf_fix_mac_mode - dsaf modify mac mode
+ * @mac_cb: mac contrl block
+ */
+void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb)
+{
+	enum dsaf_port_rate_mode mode;
+	struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+	int mac_id = mac_cb->mac_id;
+
+	if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+		return;
+	if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII)
+		mode = DSAF_PORT_RATE_10000;
+	else
+		mode = DSAF_PORT_RATE_1000;
+
+	hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode);
+}
+
+void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
+{
+	struct dsaf_hw_stats *hw_stats
+		= &dsaf_dev->hw_stats[node_num];
+
+	hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
+		DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+	hw_stats->man_pkts += dsaf_read_dev(dsaf_dev,
+		DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + 0x80 * (u64)node_num);
+	hw_stats->rx_pkts += dsaf_read_dev(dsaf_dev,
+		DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num);
+	hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev,
+		DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num);
+	hw_stats->rx_pause_frame += dsaf_read_dev(dsaf_dev,
+		DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + 0x80 * (u64)node_num);
+	hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev,
+		DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num);
+	hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev,
+		DSAF_INODE_SBM_DROP_NUM_0_REG + 0x80 * (u64)node_num);
+	hw_stats->crc_false += dsaf_read_dev(dsaf_dev,
+		DSAF_INODE_CRC_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
+	hw_stats->bp_drop += dsaf_read_dev(dsaf_dev,
+		DSAF_INODE_BP_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+	hw_stats->rslt_drop += dsaf_read_dev(dsaf_dev,
+		DSAF_INODE_RSLT_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+	hw_stats->local_addr_false += dsaf_read_dev(dsaf_dev,
+		DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
+
+	hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
+		DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num);
+	hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
+		DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
+
+	hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev,
+		DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num);
+}
+
+/**
+ *hns_dsaf_get_regs - dump dsaf regs
+ *@dsaf_dev: dsaf device
+ *@data:data for value of regs
+ */
+void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
+{
+	u32 i = 0;
+	u32 j;
+	u32 *p = data;
+
+	/* dsaf common registers */
+	p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG);
+	p[1] = dsaf_read_dev(ddev, DSAF_CFG_0_REG);
+	p[2] = dsaf_read_dev(ddev, DSAF_ECC_ERR_INVERT_0_REG);
+	p[3] = dsaf_read_dev(ddev, DSAF_ABNORMAL_TIMEOUT_0_REG);
+	p[4] = dsaf_read_dev(ddev, DSAF_FSM_TIMEOUT_0_REG);
+	p[5] = dsaf_read_dev(ddev, DSAF_DSA_REG_CNT_CLR_CE_REG);
+	p[6] = dsaf_read_dev(ddev, DSAF_DSA_SBM_INF_FIFO_THRD_REG);
+	p[7] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_SEL_REG);
+	p[8] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_CNT_REG);
+
+	p[9] = dsaf_read_dev(ddev, DSAF_PFC_EN_0_REG + port * 4);
+	p[10] = dsaf_read_dev(ddev, DSAF_PFC_UNIT_CNT_0_REG + port * 4);
+	p[11] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
+	p[12] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
+	p[13] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
+	p[14] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
+	p[15] = dsaf_read_dev(ddev, DSAF_PPE_INT_MSK_0_REG + port * 4);
+	p[16] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_MSK_0_REG + port * 4);
+	p[17] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
+	p[18] = dsaf_read_dev(ddev, DSAF_PPE_INT_SRC_0_REG + port * 4);
+	p[19] =  dsaf_read_dev(ddev, DSAF_ROCEE_INT_SRC_0_REG + port * 4);
+	p[20] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
+	p[21] = dsaf_read_dev(ddev, DSAF_PPE_INT_STS_0_REG + port * 4);
+	p[22] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_STS_0_REG + port * 4);
+	p[23] = dsaf_read_dev(ddev, DSAF_PPE_QID_CFG_0_REG + port * 4);
+
+	for (i = 0; i < DSAF_SW_PORT_NUM; i++)
+		p[24 + i] = dsaf_read_dev(ddev,
+				DSAF_SW_PORT_TYPE_0_REG + i * 4);
+
+	p[32] = dsaf_read_dev(ddev, DSAF_MIX_DEF_QID_0_REG + port * 4);
+
+	for (i = 0; i < DSAF_SW_PORT_NUM; i++)
+		p[33 + i] = dsaf_read_dev(ddev,
+				DSAF_PORT_DEF_VLAN_0_REG + i * 4);
+
+	for (i = 0; i < DSAF_TOTAL_QUEUE_NUM; i++)
+		p[41 + i] = dsaf_read_dev(ddev,
+				DSAF_VM_DEF_VLAN_0_REG + i * 4);
+
+	/* dsaf inode registers */
+	p[170] = dsaf_read_dev(ddev, DSAF_INODE_CUT_THROUGH_CFG_0_REG);
+
+	p[171] = dsaf_read_dev(ddev,
+			DSAF_INODE_ECC_ERR_ADDR_0_REG + port * 0x80);
+
+	for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
+		j = i * DSAF_COMM_CHN + port;
+		p[172 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_IN_PORT_NUM_0_REG + j * 0x80);
+		p[175 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_PRI_TC_CFG_0_REG + j * 0x80);
+		p[178 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_BP_STATUS_0_REG + j * 0x80);
+		p[181 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_PAD_DISCARD_NUM_0_REG + j * 0x80);
+		p[184 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + j * 0x80);
+		p[187 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80);
+		p[190 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80);
+		p[193 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + j * 0x80);
+		p[196 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80);
+		p[199 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_SBM_DROP_NUM_0_REG + j * 0x80);
+		p[202 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_CRC_FALSE_NUM_0_REG + j * 0x80);
+		p[205 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_BP_DISCARD_NUM_0_REG + j * 0x80);
+		p[208 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_RSLT_DISCARD_NUM_0_REG + j * 0x80);
+		p[211 + i] = dsaf_read_dev(ddev,
+			DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + j * 0x80);
+		p[214 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_VOQ_OVER_NUM_0_REG + j * 0x80);
+		p[217 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_BD_SAVE_STATUS_0_REG + j * 4);
+		p[220 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
+		p[223 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
+		p[224 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
+	}
+
+	p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
+
+	for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
+		j = i * DSAF_COMM_CHN + port;
+		p[228 + i] = dsaf_read_dev(ddev,
+				DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
+	}
+
+	p[231] = dsaf_read_dev(ddev,
+		DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4);
+
+	/* dsaf inode registers */
+	for (i = 0; i < DSAF_SBM_NUM / DSAF_COMM_CHN; i++) {
+		j = i * DSAF_COMM_CHN + port;
+		p[232 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_CFG_REG_0_REG + j * 0x80);
+		p[235 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
+		p[238 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
+		p[241 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
+		p[244 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
+		p[245 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
+		p[248 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
+		p[251 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
+		p[254 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
+		p[257 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
+		p[260 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_INER_ST_0_REG + j * 0x80);
+		p[263 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
+		p[266 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
+		p[269 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
+		p[272 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
+		p[275 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
+		p[278 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
+		p[281 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
+		p[284 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
+		p[287 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
+		p[290 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
+		p[293 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
+		p[296 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
+		p[299 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
+		p[302 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
+		p[305 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
+		p[308 + i] = dsaf_read_dev(ddev,
+				DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
+	}
+
+	/* dsaf onode registers */
+	for (i = 0; i < DSAF_XOD_NUM; i++) {
+		p[311 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + j * 0x90);
+		p[319 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + j * 0x90);
+		p[327 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + j * 0x90);
+		p[335 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + j * 0x90);
+		p[343 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + j * 0x90);
+		p[351 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_ETS_TOKEN_CFG_0_REG + j * 0x90);
+	}
+
+	p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
+	p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
+	p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
+
+	for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
+		j = i * DSAF_COMM_CHN + port;
+		p[362 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_GNT_L_0_REG + j * 0x90);
+		p[365 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_GNT_H_0_REG + j * 0x90);
+		p[368 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
+		p[371 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
+		p[374 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
+		p[377 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
+		p[380 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
+		p[383 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
+		p[386 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
+		p[389 + i] = dsaf_read_dev(ddev,
+				DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
+	}
+
+	p[392] = dsaf_read_dev(ddev,
+		DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
+	p[393] = dsaf_read_dev(ddev,
+		DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
+	p[394] = dsaf_read_dev(ddev,
+		DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
+	p[395] = dsaf_read_dev(ddev,
+		DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
+	p[396] = dsaf_read_dev(ddev,
+		DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
+	p[397] = dsaf_read_dev(ddev,
+		DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
+	p[398] = dsaf_read_dev(ddev,
+		DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
+	p[399] = dsaf_read_dev(ddev,
+		DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
+	p[400] = dsaf_read_dev(ddev,
+		DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
+	p[401] = dsaf_read_dev(ddev,
+		DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
+	p[402] = dsaf_read_dev(ddev,
+		DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
+	p[403] = dsaf_read_dev(ddev,
+		DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
+	p[404] = dsaf_read_dev(ddev,
+		DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
+
+	/* dsaf voq registers */
+	for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
+		j = (i * DSAF_COMM_CHN + port) * 0x90;
+		p[405 + i] = dsaf_read_dev(ddev,
+			DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
+		p[408 + i] = dsaf_read_dev(ddev,
+			DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
+		p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
+		p[414 + i] = dsaf_read_dev(ddev,
+			DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
+		p[417 + i] = dsaf_read_dev(ddev,
+			DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
+		p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
+		p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
+		p[426 + i] = dsaf_read_dev(ddev,
+			DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
+		p[429 + i] = dsaf_read_dev(ddev,
+			DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
+		p[432 + i] = dsaf_read_dev(ddev,
+			DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
+		p[435 + i] = dsaf_read_dev(ddev,
+			DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
+		p[438 + i] = dsaf_read_dev(ddev,
+			DSAF_VOQ_BP_ALL_THRD_0_REG + j);
+	}
+
+	/* dsaf tbl registers */
+	p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
+	p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
+	p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
+	p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
+	p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
+	p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
+	p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
+	p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
+	p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+	p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
+	p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
+	p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
+	p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
+	p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+	p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
+	p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+	p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+	p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+	p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+	p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+	p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+	p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+	p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
+
+	for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
+		j = i * 0x8;
+		p[464 + 2 * i] = dsaf_read_dev(ddev,
+			DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
+		p[465 + 2 * i] = dsaf_read_dev(ddev,
+			DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
+	}
+
+	p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
+	p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
+	p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
+	p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
+	p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
+	p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
+	p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
+	p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
+	p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
+	p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
+	p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
+	p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
+
+	/* dsaf other registers */
+	p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
+	p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
+	p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
+	p[495] = dsaf_read_dev(ddev,
+		DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
+	p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
+	p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+
+	/* mark end of dsaf regs */
+	for (i = 498; i < 504; i++)
+		p[i] = 0xdddddddd;
+}
+
+static char *hns_dsaf_get_node_stats_strings(char *data, int node)
+{
+	char *buff = data;
+
+	snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
+	buff = buff + ETH_GSTRING_LEN;
+
+	return buff;
+}
+
+static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
+				    int node_num)
+{
+	u64 *p = data;
+	struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num];
+
+	p[0] = hw_stats->pad_drop;
+	p[1] = hw_stats->man_pkts;
+	p[2] = hw_stats->rx_pkts;
+	p[3] = hw_stats->rx_pkt_id;
+	p[4] = hw_stats->rx_pause_frame;
+	p[5] = hw_stats->release_buf_num;
+	p[6] = hw_stats->sbm_drop;
+	p[7] = hw_stats->crc_false;
+	p[8] = hw_stats->bp_drop;
+	p[9] = hw_stats->rslt_drop;
+	p[10] = hw_stats->local_addr_false;
+	p[11] = hw_stats->vlan_drop;
+	p[12] = hw_stats->stp_drop;
+	p[13] = hw_stats->tx_pkts;
+
+	return &p[14];
+}
+
+/**
+ *hns_dsaf_get_stats - get dsaf statistic
+ *@ddev: dsaf device
+ *@data:statistic value
+ *@port: port num
+ */
+void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port)
+{
+	u64 *p = data;
+	int node_num = port;
+
+	/* for ge/xge node info */
+	p = hns_dsaf_get_node_stats(ddev, p, node_num);
+
+	/* for ppe node info */
+	node_num = port + DSAF_PPE_INODE_BASE;
+	(void)hns_dsaf_get_node_stats(ddev, p, node_num);
+}
+
+/**
+ *hns_dsaf_get_sset_count - get dsaf string set count
+ *@stringset: type of values in data
+ *return dsaf string name count
+ */
+int hns_dsaf_get_sset_count(int stringset)
+{
+	if (stringset == ETH_SS_STATS)
+		return DSAF_STATIC_NUM;
+
+	return 0;
+}
+
+/**
+ *hns_dsaf_get_strings - get dsaf string set
+ *@stringset:srting set index
+ *@data:strings name value
+ *@port:port index
+ */
+void hns_dsaf_get_strings(int stringset, u8 *data, int port)
+{
+	char *buff = (char *)data;
+	int node = port;
+
+	if (stringset != ETH_SS_STATS)
+		return;
+
+	/* for ge/xge node info */
+	buff = hns_dsaf_get_node_stats_strings(buff, node);
+
+	/* for ppe node info */
+	node = port + DSAF_PPE_INODE_BASE;
+	(void)hns_dsaf_get_node_stats_strings(buff, node);
+}
+
+/**
+ *hns_dsaf_get_sset_count - get dsaf regs count
+ *return dsaf regs count
+ */
+int hns_dsaf_get_regs_count(void)
+{
+	return DSAF_DUMP_REGS_NUM;
+}
+
+/**
+ * dsaf_probe - probo dsaf dev
+ * @pdev: dasf platform device
+ * retuen 0 - success , negative --fail
+ */
+static int hns_dsaf_probe(struct platform_device *pdev)
+{
+	struct dsaf_device *dsaf_dev;
+	int ret;
+
+	dsaf_dev = hns_dsaf_alloc_dev(&pdev->dev, sizeof(struct dsaf_drv_priv));
+	if (IS_ERR(dsaf_dev)) {
+		ret = PTR_ERR(dsaf_dev);
+		dev_err(&pdev->dev,
+			"dsaf_probe dsaf_alloc_dev failed, ret = %#x!\n", ret);
+		return ret;
+	}
+
+	ret = hns_dsaf_get_cfg(dsaf_dev);
+	if (ret)
+		goto free_dev;
+
+	ret = hns_dsaf_init(dsaf_dev);
+	if (ret)
+		goto free_cfg;
+
+	ret = hns_mac_init(dsaf_dev);
+	if (ret)
+		goto uninit_dsaf;
+
+	ret = hns_ppe_init(dsaf_dev);
+	if (ret)
+		goto uninit_mac;
+
+	ret = hns_dsaf_ae_init(dsaf_dev);
+	if (ret)
+		goto uninit_ppe;
+
+	return 0;
+
+uninit_ppe:
+	hns_ppe_uninit(dsaf_dev);
+
+uninit_mac:
+	hns_mac_uninit(dsaf_dev);
+
+uninit_dsaf:
+	hns_dsaf_free(dsaf_dev);
+
+free_cfg:
+	hns_dsaf_free_cfg(dsaf_dev);
+
+free_dev:
+	hns_dsaf_free_dev(dsaf_dev);
+
+	return ret;
+}
+
+/**
+ * dsaf_remove - remove dsaf dev
+ * @pdev: dasf platform device
+ */
+static int hns_dsaf_remove(struct platform_device *pdev)
+{
+	struct dsaf_device *dsaf_dev = dev_get_drvdata(&pdev->dev);
+
+	hns_dsaf_ae_uninit(dsaf_dev);
+
+	hns_ppe_uninit(dsaf_dev);
+
+	hns_mac_uninit(dsaf_dev);
+
+	hns_dsaf_free(dsaf_dev);
+
+	hns_dsaf_free_cfg(dsaf_dev);
+
+	hns_dsaf_free_dev(dsaf_dev);
+
+	return 0;
+}
+
+static const struct of_device_id g_dsaf_match[] = {
+	{.compatible = "hisilicon,hns-dsaf-v1"},
+	{.compatible = "hisilicon,hns-dsaf-v2"},
+	{}
+};
+
+static struct platform_driver g_dsaf_driver = {
+	.probe = hns_dsaf_probe,
+	.remove = hns_dsaf_remove,
+	.driver = {
+		.name = DSAF_DRV_NAME,
+		.of_match_table = g_dsaf_match,
+	},
+};
+
+module_platform_driver(g_dsaf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HNS DSAF driver");
+MODULE_VERSION(DSAF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
new file mode 100644
index 0000000..315b07e
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -0,0 +1,427 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HNS_DSAF_MAIN_H
+#define __HNS_DSAF_MAIN_H
+#include "hnae.h"
+
+#include "hns_dsaf_reg.h"
+#include "hns_dsaf_mac.h"
+
+struct hns_mac_cb;
+
+#define DSAF_DRV_NAME "hns_dsaf"
+#define DSAF_MOD_VERSION "v1.0"
+
+#define ENABLE		(0x1)
+#define DISABLE		(0x0)
+
+#define HNS_DSAF_DEBUG_NW_REG_OFFSET (0x100000)
+
+#define DSAF_BASE_INNER_PORT_NUM (127)  /* mac tbl qid*/
+
+#define DSAF_MAX_CHIP_NUM (2)  /*max 2 chips */
+
+#define DSAF_DEFAUTL_QUEUE_NUM_PER_PPE (22)
+
+#define HNS_DSAF_MAX_DESC_CNT (1024)
+#define HNS_DSAF_MIN_DESC_CNT (16)
+
+#define DSAF_INVALID_ENTRY_IDX (0xffff)
+
+#define DSAF_CFG_READ_CNT   (30)
+#define DSAF_SRAM_INIT_FINISH_FLAG (0xff)
+
+#define MAC_NUM_OCTETS_PER_ADDR 6
+
+#define DSAF_DUMP_REGS_NUM 504
+#define DSAF_STATIC_NUM 28
+
+#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+
+enum hal_dsaf_mode {
+	HRD_DSAF_NO_DSAF_MODE	= 0x0,
+	HRD_DSAF_MODE		= 0x1,
+};
+
+enum hal_dsaf_tc_mode {
+	HRD_DSAF_4TC_MODE		= 0X0,
+	HRD_DSAF_8TC_MODE		= 0X1,
+};
+
+struct dsaf_vm_def_vlan {
+	u32 vm_def_vlan_id;
+	u32 vm_def_vlan_cfi;
+	u32 vm_def_vlan_pri;
+};
+
+struct dsaf_tbl_tcam_data {
+	u32 tbl_tcam_data_high;
+	u32 tbl_tcam_data_low;
+};
+
+#define DSAF_PORT_MSK_NUM \
+	((DSAF_TOTAL_QUEUE_NUM + DSAF_SERVICE_NW_NUM - 1) / 32 + 1)
+struct dsaf_tbl_tcam_mcast_cfg {
+	u8 tbl_mcast_old_en;
+	u8 tbl_mcast_item_vld;
+	u32 tbl_mcast_port_msk[DSAF_PORT_MSK_NUM];
+};
+
+struct dsaf_tbl_tcam_ucast_cfg {
+	u32 tbl_ucast_old_en;
+	u32 tbl_ucast_item_vld;
+	u32 tbl_ucast_mac_discard;
+	u32 tbl_ucast_dvc;
+	u32 tbl_ucast_out_port;
+};
+
+struct dsaf_tbl_line_cfg {
+	u32 tbl_line_mac_discard;
+	u32 tbl_line_dvc;
+	u32 tbl_line_out_port;
+};
+
+enum dsaf_port_rate_mode {
+	DSAF_PORT_RATE_1000 = 0,
+	DSAF_PORT_RATE_2500,
+	DSAF_PORT_RATE_10000
+};
+
+enum dsaf_stp_port_type {
+	DSAF_STP_PORT_TYPE_DISCARD = 0,
+	DSAF_STP_PORT_TYPE_BLOCK = 1,
+	DSAF_STP_PORT_TYPE_LISTEN = 2,
+	DSAF_STP_PORT_TYPE_LEARN = 3,
+	DSAF_STP_PORT_TYPE_FORWARD = 4
+};
+
+enum dsaf_sw_port_type {
+	DSAF_SW_PORT_TYPE_NON_VLAN = 0,
+	DSAF_SW_PORT_TYPE_ACCESS = 1,
+	DSAF_SW_PORT_TYPE_TRUNK = 2,
+};
+
+#define DSAF_SUB_BASE_SIZE                        (0x10000)
+
+/* dsaf mode define */
+enum dsaf_mode {
+	DSAF_MODE_INVALID = 0,	/**< Invalid dsaf mode */
+	DSAF_MODE_ENABLE_FIX,	/**< en DSAF-mode, fixed to queue*/
+	DSAF_MODE_ENABLE_0VM,	/**< en DSAF-mode, support 0 VM */
+	DSAF_MODE_ENABLE_8VM,	/**< en DSAF-mode, support 8 VM */
+	DSAF_MODE_ENABLE_16VM,	/**< en DSAF-mode, support 16 VM */
+	DSAF_MODE_ENABLE_32VM,	/**< en DSAF-mode, support 32 VM */
+	DSAF_MODE_ENABLE_128VM,	/**< en DSAF-mode, support 128 VM */
+	DSAF_MODE_ENABLE,		/**< before is enable DSAF mode*/
+	DSAF_MODE_DISABLE_FIX,	/**< non-dasf, fixed to queue*/
+	DSAF_MODE_DISABLE_2PORT_8VM,	/**< non-dasf, 2port 8VM */
+	DSAF_MODE_DISABLE_2PORT_16VM,	/**< non-dasf, 2port 16VM */
+	DSAF_MODE_DISABLE_2PORT_64VM,	/**< non-dasf, 2port 64VM */
+	DSAF_MODE_DISABLE_6PORT_0VM,	/**< non-dasf, 6port 0VM */
+	DSAF_MODE_DISABLE_6PORT_2VM,	/**< non-dasf, 6port 2VM */
+	DSAF_MODE_DISABLE_6PORT_4VM,	/**< non-dasf, 6port 4VM */
+	DSAF_MODE_DISABLE_6PORT_16VM,	/**< non-dasf, 6port 16VM */
+	DSAF_MODE_MAX		/**< the last one, use as the num */
+};
+
+#define DSAF_DEST_PORT_NUM 256	/* DSAF max port num */
+#define DSAF_WORD_BIT_CNT 32  /* the num bit of word */
+
+/*mac entry, mc or uc entry*/
+struct dsaf_drv_mac_single_dest_entry {
+	/* mac addr, match the entry*/
+	u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+	u16 in_vlan_id; /* value of VlanId */
+
+	/* the vld input port num, dsaf-mode fix 0, */
+	/*	non-dasf is the entry whitch port vld*/
+	u8 in_port_num;
+
+	u8 port_num; /*output port num*/
+	u8 rsv[6];
+};
+
+/*only mc entry*/
+struct dsaf_drv_mac_multi_dest_entry {
+	/* mac addr, match the entry*/
+	u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+	u16 in_vlan_id;
+	/* this mac addr output port,*/
+	/*	bit0-bit5 means Port0-Port5(1bit is vld)**/
+	u32 port_mask[DSAF_DEST_PORT_NUM / DSAF_WORD_BIT_CNT];
+
+	/* the vld input port num, dsaf-mode fix 0,*/
+	/*	non-dasf is the entry whitch port vld*/
+	u8 in_port_num;
+	u8 rsv[7];
+};
+
+struct dsaf_hw_stats {
+	u64 pad_drop;
+	u64 man_pkts;
+	u64 rx_pkts;
+	u64 rx_pkt_id;
+	u64 rx_pause_frame;
+	u64 release_buf_num;
+	u64 sbm_drop;
+	u64 crc_false;
+	u64 bp_drop;
+	u64 rslt_drop;
+	u64 local_addr_false;
+	u64 vlan_drop;
+	u64 stp_drop;
+	u64 tx_pkts;
+};
+
+struct hnae_vf_cb {
+	u8 port_index;
+	struct hns_mac_cb *mac_cb;
+	struct dsaf_device *dsaf_dev;
+	struct hnae_handle  ae_handle; /* must be the last number */
+};
+
+struct dsaf_int_xge_src {
+	u32    xid_xge_ecc_err_int_src;
+	u32    xid_xge_fsm_timout_int_src;
+	u32    sbm_xge_lnk_fsm_timout_int_src;
+	u32    sbm_xge_lnk_ecc_2bit_int_src;
+	u32    sbm_xge_mib_req_failed_int_src;
+	u32    sbm_xge_mib_req_fsm_timout_int_src;
+	u32    sbm_xge_mib_rels_fsm_timout_int_src;
+	u32    sbm_xge_sram_ecc_2bit_int_src;
+	u32    sbm_xge_mib_buf_sum_err_int_src;
+	u32    sbm_xge_mib_req_extra_int_src;
+	u32    sbm_xge_mib_rels_extra_int_src;
+	u32    voq_xge_start_to_over_0_int_src;
+	u32    voq_xge_start_to_over_1_int_src;
+	u32    voq_xge_ecc_err_int_src;
+};
+
+struct dsaf_int_ppe_src {
+	u32    xid_ppe_fsm_timout_int_src;
+	u32    sbm_ppe_lnk_fsm_timout_int_src;
+	u32    sbm_ppe_lnk_ecc_2bit_int_src;
+	u32    sbm_ppe_mib_req_failed_int_src;
+	u32    sbm_ppe_mib_req_fsm_timout_int_src;
+	u32    sbm_ppe_mib_rels_fsm_timout_int_src;
+	u32    sbm_ppe_sram_ecc_2bit_int_src;
+	u32    sbm_ppe_mib_buf_sum_err_int_src;
+	u32    sbm_ppe_mib_req_extra_int_src;
+	u32    sbm_ppe_mib_rels_extra_int_src;
+	u32    voq_ppe_start_to_over_0_int_src;
+	u32    voq_ppe_ecc_err_int_src;
+	u32    xod_ppe_fifo_rd_empty_int_src;
+	u32    xod_ppe_fifo_wr_full_int_src;
+};
+
+struct dsaf_int_rocee_src {
+	u32    xid_rocee_fsm_timout_int_src;
+	u32    sbm_rocee_lnk_fsm_timout_int_src;
+	u32    sbm_rocee_lnk_ecc_2bit_int_src;
+	u32    sbm_rocee_mib_req_failed_int_src;
+	u32    sbm_rocee_mib_req_fsm_timout_int_src;
+	u32    sbm_rocee_mib_rels_fsm_timout_int_src;
+	u32    sbm_rocee_sram_ecc_2bit_int_src;
+	u32    sbm_rocee_mib_buf_sum_err_int_src;
+	u32    sbm_rocee_mib_req_extra_int_src;
+	u32    sbm_rocee_mib_rels_extra_int_src;
+	u32    voq_rocee_start_to_over_0_int_src;
+	u32    voq_rocee_ecc_err_int_src;
+};
+
+struct dsaf_int_tbl_src {
+	u32    tbl_da0_mis_src;
+	u32    tbl_da1_mis_src;
+	u32    tbl_da2_mis_src;
+	u32    tbl_da3_mis_src;
+	u32    tbl_da4_mis_src;
+	u32    tbl_da5_mis_src;
+	u32    tbl_da6_mis_src;
+	u32    tbl_da7_mis_src;
+	u32    tbl_sa_mis_src;
+	u32    tbl_old_sech_end_src;
+	u32    lram_ecc_err1_src;
+	u32    lram_ecc_err2_src;
+	u32    tram_ecc_err1_src;
+	u32    tram_ecc_err2_src;
+	u32    tbl_ucast_bcast_xge0_src;
+	u32    tbl_ucast_bcast_xge1_src;
+	u32    tbl_ucast_bcast_xge2_src;
+	u32    tbl_ucast_bcast_xge3_src;
+	u32    tbl_ucast_bcast_xge4_src;
+	u32    tbl_ucast_bcast_xge5_src;
+	u32    tbl_ucast_bcast_ppe_src;
+	u32    tbl_ucast_bcast_rocee_src;
+};
+
+struct dsaf_int_stat {
+	struct dsaf_int_xge_src dsaf_int_xge_stat[DSAF_COMM_CHN];
+	struct dsaf_int_ppe_src dsaf_int_ppe_stat[DSAF_COMM_CHN];
+	struct dsaf_int_rocee_src dsaf_int_rocee_stat[DSAF_COMM_CHN];
+	struct dsaf_int_tbl_src dsaf_int_tbl_stat[1];
+
+};
+
+/* Dsaf device struct define ,and mac ->  dsaf */
+struct dsaf_device {
+	struct device *dev;
+	struct hnae_ae_dev ae_dev;
+
+	void *priv;
+
+	int virq[DSAF_IRQ_NUM];
+
+	u8 __iomem *sc_base;
+	u8 __iomem *sds_base;
+	u8 __iomem *ppe_base;
+	u8 __iomem *io_base;
+	u8 __iomem *cpld_base;
+
+	u32 desc_num; /*  desc num per queue*/
+	u32 buf_size; /*  ring buffer size */
+	int buf_size_type; /* ring buffer size-type */
+	enum dsaf_mode dsaf_mode;	 /* dsaf mode  */
+	enum hal_dsaf_mode dsaf_en;
+	enum hal_dsaf_tc_mode dsaf_tc_mode;
+	u32 dsaf_ver;
+
+	struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
+	struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
+	struct hns_mac_cb *mac_cb;
+
+	struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
+	struct dsaf_int_stat int_stat;
+};
+
+static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev)
+{
+	return (void *)((u8 *)dsaf_dev + sizeof(*dsaf_dev));
+}
+
+struct dsaf_drv_tbl_tcam_key {
+	union {
+		struct {
+			u8 mac_3;
+			u8 mac_2;
+			u8 mac_1;
+			u8 mac_0;
+		} bits;
+
+		u32 val;
+	} high;
+	union {
+		struct {
+			u32 port:4; /* port id, */
+			/* dsaf-mode fixed 0, non-dsaf-mode port id*/
+			u32 vlan:12; /* vlan id */
+			u32 mac_5:8;
+			u32 mac_4:8;
+		} bits;
+
+		u32 val;
+	} low;
+};
+
+struct dsaf_drv_soft_mac_tbl {
+	struct dsaf_drv_tbl_tcam_key tcam_key;
+	u16 index; /*the entry's index in tcam tab*/
+};
+
+struct dsaf_drv_priv {
+	/* soft tab Mac key, for hardware tab*/
+	struct dsaf_drv_soft_mac_tbl *soft_mac_tbl;
+};
+
+static inline void hns_dsaf_tbl_tcam_addr_cfg(struct dsaf_device *dsaf_dev,
+					      u32 tab_tcam_addr)
+{
+	dsaf_set_dev_field(dsaf_dev, DSAF_TBL_TCAM_ADDR_0_REG,
+			   DSAF_TBL_TCAM_ADDR_M, DSAF_TBL_TCAM_ADDR_S,
+			   tab_tcam_addr);
+}
+
+static inline void hns_dsaf_tbl_tcam_load_pul(struct dsaf_device *dsaf_dev)
+{
+	u32 o_tbl_pul;
+
+	o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+	dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_LOAD_S, 1);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+	dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_LOAD_S, 0);
+	dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+static inline void hns_dsaf_tbl_line_addr_cfg(struct dsaf_device *dsaf_dev,
+					      u32 tab_line_addr)
+{
+	dsaf_set_dev_field(dsaf_dev, DSAF_TBL_LINE_ADDR_0_REG,
+			   DSAF_TBL_LINE_ADDR_M, DSAF_TBL_LINE_ADDR_S,
+			   tab_line_addr);
+}
+
+static inline int hns_dsaf_get_comm_idx_by_port(int port)
+{
+	if ((port < DSAF_COMM_CHN) || (port == DSAF_MAX_PORT_NUM_PER_CHIP))
+		return 0;
+	else
+		return (port - DSAF_COMM_CHN + 1);
+}
+
+static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
+	struct hnae_handle *handle)
+{
+	return container_of(handle, struct hnae_vf_cb, ae_handle);
+}
+
+int hns_dsaf_set_mac_uc_entry(struct dsaf_device *dsaf_dev,
+			      struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_set_mac_mc_entry(struct dsaf_device *dsaf_dev,
+			      struct dsaf_drv_mac_multi_dest_entry *mac_entry);
+int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
+			     struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
+			   u8 in_port_num, u8 *addr);
+int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
+			     struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
+			      struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
+			      struct dsaf_drv_mac_multi_dest_entry *mac_entry);
+int hns_dsaf_get_mac_entry_by_index(
+	struct dsaf_device *dsaf_dev,
+	u16 entry_index,
+	struct dsaf_drv_mac_multi_dest_entry *mac_entry);
+
+void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val);
+
+void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
+
+void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val);
+
+void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
+
+int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
+void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
+
+void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
+void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
+void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
+				    u32 port, u32 val);
+
+void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
+
+int hns_dsaf_get_sset_count(int stringset);
+void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port);
+void hns_dsaf_get_strings(int stringset, u8 *data, int port);
+
+void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
+int hns_dsaf_get_regs_count(void);
+
+#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
new file mode 100644
index 0000000..d611388
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "hns_dsaf_misc.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_reg.h"
+#include "hns_dsaf_ppe.h"
+
+void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
+		      u16 speed, int data)
+{
+	int speed_reg = 0;
+	u8 value;
+
+	if (!mac_cb) {
+		pr_err("sfp_led_opt mac_dev is null!\n");
+		return;
+	}
+	if (!mac_cb->cpld_vaddr) {
+		dev_err(mac_cb->dev, "mac_id=%d, cpld_vaddr is null !\n",
+			mac_cb->mac_id);
+		return;
+	}
+
+	if (speed == MAC_SPEED_10000)
+		speed_reg = 1;
+
+	value = mac_cb->cpld_led_value;
+
+	if (link_status) {
+		dsaf_set_bit(value, DSAF_LED_LINK_B, link_status);
+		dsaf_set_field(value, DSAF_LED_SPEED_M,
+			       DSAF_LED_SPEED_S, speed_reg);
+		dsaf_set_bit(value, DSAF_LED_DATA_B, data);
+
+		if (value != mac_cb->cpld_led_value) {
+			dsaf_write_b(mac_cb->cpld_vaddr, value);
+			mac_cb->cpld_led_value = value;
+		}
+	} else {
+		dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+		mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
+	}
+}
+
+void cpld_led_reset(struct hns_mac_cb *mac_cb)
+{
+	if (!mac_cb || !mac_cb->cpld_vaddr)
+		return;
+
+	dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+	mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
+}
+
+int cpld_set_led_id(struct hns_mac_cb *mac_cb,
+		    enum hnae_led_state status)
+{
+	switch (status) {
+	case HNAE_LED_ACTIVE:
+		mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr);
+		return 2;
+	case HNAE_LED_ON:
+		dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
+			     CPLD_LED_ON_VALUE);
+		dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+		break;
+	case HNAE_LED_OFF:
+		dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
+			     CPLD_LED_DEFAULT_VALUE);
+		dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+		break;
+	case HNAE_LED_INACTIVE:
+		dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
+			     CPLD_LED_DEFAULT_VALUE);
+		dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+#define RESET_REQ_OR_DREQ 1
+
+void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
+{
+	u32 xbar_reg_addr;
+	u32 nt_reg_addr;
+
+	if (!val) {
+		xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG;
+		nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG;
+	} else {
+		xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_DREQ_REG;
+		nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG;
+	}
+
+	dsaf_write_reg(dsaf_dev->sc_base, xbar_reg_addr,
+		       RESET_REQ_OR_DREQ);
+	dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr,
+		       RESET_REQ_OR_DREQ);
+}
+
+void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+{
+	u32 reg_val = 0;
+	u32 reg_addr;
+
+	if (port >= DSAF_XGE_NUM)
+		return;
+
+	reg_val |= RESET_REQ_OR_DREQ;
+	reg_val |= 0x2082082 << port;
+
+	if (val == 0)
+		reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
+	else
+		reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
+
+	dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
+				    u32 port, u32 val)
+{
+	u32 reg_val = 0;
+	u32 reg_addr;
+
+	if (port >= DSAF_XGE_NUM)
+		return;
+
+	reg_val |= XGMAC_TRX_CORE_SRST_M << port;
+
+	if (val == 0)
+		reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
+	else
+		reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
+
+	dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+{
+	u32 reg_val_1;
+	u32 reg_val_2;
+
+	if (port >= DSAF_GE_NUM)
+		return;
+
+	if (port < DSAF_SERVICE_NW_NUM) {
+		reg_val_1  = 0x1 << port;
+		reg_val_2  = 0x1041041 << port;
+
+		if (val == 0) {
+			dsaf_write_reg(dsaf_dev->sc_base,
+				       DSAF_SUB_SC_GE_RESET_REQ1_REG,
+				       reg_val_1);
+
+			dsaf_write_reg(dsaf_dev->sc_base,
+				       DSAF_SUB_SC_GE_RESET_REQ0_REG,
+				       reg_val_2);
+		} else {
+			dsaf_write_reg(dsaf_dev->sc_base,
+				       DSAF_SUB_SC_GE_RESET_DREQ0_REG,
+				       reg_val_2);
+
+			dsaf_write_reg(dsaf_dev->sc_base,
+				       DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+				       reg_val_1);
+		}
+	} else {
+		reg_val_1 = 0x15540 << (port - 6);
+		reg_val_2 = 0x100 << (port - 6);
+
+		if (val == 0) {
+			dsaf_write_reg(dsaf_dev->sc_base,
+				       DSAF_SUB_SC_GE_RESET_REQ1_REG,
+				       reg_val_1);
+
+			dsaf_write_reg(dsaf_dev->sc_base,
+				       DSAF_SUB_SC_PPE_RESET_REQ_REG,
+				       reg_val_2);
+		} else {
+			dsaf_write_reg(dsaf_dev->sc_base,
+				       DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+				       reg_val_1);
+
+			dsaf_write_reg(dsaf_dev->sc_base,
+				       DSAF_SUB_SC_PPE_RESET_DREQ_REG,
+				       reg_val_2);
+		}
+	}
+}
+
+void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+{
+	u32 reg_val = 0;
+	u32 reg_addr;
+
+	reg_val |= RESET_REQ_OR_DREQ << port;
+
+	if (val == 0)
+		reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
+	else
+		reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
+
+	dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
+{
+	int comm_index = ppe_common->comm_index;
+	struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
+	u32 reg_val;
+	u32 reg_addr;
+
+	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+		reg_val = RESET_REQ_OR_DREQ;
+		if (val == 0)
+			reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
+		else
+			reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
+
+	} else {
+		reg_val = 0x100 << (comm_index - 1);
+
+		if (val == 0)
+			reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
+		else
+			reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
+	}
+
+	dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+/**
+ * hns_mac_get_sds_mode - get phy ifterface form serdes mode
+ * @mac_cb: mac control block
+ * retuen phy interface
+ */
+phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
+{
+	u32 hilink3_mode;
+	u32 hilink4_mode;
+	void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
+	int dev_id = mac_cb->mac_id;
+	phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+
+	hilink3_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK3_REG);
+	hilink4_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK4_REG);
+	if (dev_id >= 0 && dev_id <= 3) {
+		if (hilink4_mode == 0)
+			phy_if = PHY_INTERFACE_MODE_SGMII;
+		else
+			phy_if = PHY_INTERFACE_MODE_XGMII;
+	} else if (dev_id >= 4 && dev_id <= 5) {
+		if (hilink3_mode == 0)
+			phy_if = PHY_INTERFACE_MODE_SGMII;
+		else
+			phy_if = PHY_INTERFACE_MODE_XGMII;
+	} else {
+		phy_if = PHY_INTERFACE_MODE_SGMII;
+	}
+
+	dev_dbg(mac_cb->dev,
+		"hilink3_mode=%d, hilink4_mode=%d dev_id=%d, phy_if=%d\n",
+		hilink3_mode, hilink4_mode, dev_id, phy_if);
+	return phy_if;
+}
+
+/**
+ * hns_mac_config_sds_loopback - set loop back for serdes
+ * @mac_cb: mac control block
+ * retuen 0 == success
+ */
+int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
+{
+	/* port 0-3 hilink4 base is serdes_vaddr + 0x00280000
+	 * port 4-7 hilink3 base is serdes_vaddr + 0x00200000
+	 */
+	u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+		       (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
+	const u8 lane_id[] = {
+		0,	/* mac 0 -> lane 0 */
+		1,	/* mac 1 -> lane 1 */
+		2,	/* mac 2 -> lane 2 */
+		3,	/* mac 3 -> lane 3 */
+		2,	/* mac 4 -> lane 2 */
+		3,	/* mac 5 -> lane 3 */
+		0,	/* mac 6 -> lane 0 */
+		1	/* mac 7 -> lane 1 */
+	};
+#define RX_CSR(lane, reg) ((0x4080 + (reg) * 0x0002 + (lane) * 0x0200) * 2)
+	u64 reg_offset = RX_CSR(lane_id[mac_cb->mac_id], 0);
+
+	int sfp_prsnt;
+	int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
+
+	if (!mac_cb->phy_node) {
+		if (ret)
+			pr_info("please confirm sfp is present or not\n");
+		else
+			if (!sfp_prsnt)
+				pr_info("no sfp in this eth\n");
+	}
+
+	dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
new file mode 100644
index 0000000..419f07a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_MISC_H
+#define _HNS_DSAF_MISC_H
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_mac.h"
+
+#define CPLD_ADDR_PORT_OFFSET	0x4
+
+#define HS_LED_ON		0xE
+#define HS_LED_OFF		0xF
+
+#define CPLD_LED_ON_VALUE	1
+#define CPLD_LED_DEFAULT_VALUE	0
+
+#define MAC_SFP_PORT_OFFSET	0x2
+
+#define DSAF_LED_SPEED_S 0
+#define DSAF_LED_SPEED_M (0x3 << DSAF_LED_SPEED_S)
+
+#define DSAF_LED_LINK_B 2
+#define DSAF_LED_DATA_B 4
+#define DSAF_LED_ANCHOR_B 5
+
+void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
+		      u16 speed, int data);
+void cpld_led_reset(struct hns_mac_cb *mac_cb);
+int cpld_set_led_id(struct hns_mac_cb *mac_cb,
+		    enum hnae_led_state status);
+int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
new file mode 100644
index 0000000..67f33f1
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -0,0 +1,583 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include "hns_dsaf_ppe.h"
+
+static void __iomem *hns_ppe_common_get_ioaddr(
+	struct ppe_common_cb *ppe_common)
+{
+	void __iomem *base_addr;
+
+	int idx = ppe_common->comm_index;
+
+	if (idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
+		base_addr = ppe_common->dsaf_dev->ppe_base
+			+ PPE_COMMON_REG_OFFSET;
+	else
+		base_addr = ppe_common->dsaf_dev->sds_base
+			+ (idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
+			+ PPE_COMMON_REG_OFFSET;
+
+	return base_addr;
+}
+
+/**
+ * hns_ppe_common_get_cfg - get ppe common config
+ * @dsaf_dev: dasf device
+ * comm_index: common index
+ * retuen 0 - success , negative --fail
+ */
+int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
+{
+	struct ppe_common_cb *ppe_common;
+	int ppe_num;
+
+	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+		ppe_num = HNS_PPE_SERVICE_NW_ENGINE_NUM;
+	else
+		ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM;
+
+	ppe_common = devm_kzalloc(dsaf_dev->dev, sizeof(*ppe_common) +
+		ppe_num * sizeof(struct hns_ppe_cb), GFP_KERNEL);
+	if (!ppe_common)
+		return -ENOMEM;
+
+	ppe_common->ppe_num = ppe_num;
+	ppe_common->dsaf_dev = dsaf_dev;
+	ppe_common->comm_index = comm_index;
+	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+		ppe_common->ppe_mode = PPE_COMMON_MODE_SERVICE;
+	else
+		ppe_common->ppe_mode = PPE_COMMON_MODE_DEBUG;
+	ppe_common->dev = dsaf_dev->dev;
+
+	ppe_common->io_base = hns_ppe_common_get_ioaddr(ppe_common);
+
+	dsaf_dev->ppe_common[comm_index] = ppe_common;
+
+	return 0;
+}
+
+void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
+{
+	dsaf_dev->ppe_common[comm_index] = NULL;
+}
+
+static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+					int ppe_idx)
+{
+	void __iomem *base_addr;
+	int common_idx = ppe_common->comm_index;
+
+	if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
+		base_addr = ppe_common->dsaf_dev->ppe_base +
+			ppe_idx * PPE_REG_OFFSET;
+
+	} else {
+		base_addr = ppe_common->dsaf_dev->sds_base +
+			(common_idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET;
+	}
+
+	return base_addr;
+}
+
+static int hns_ppe_get_port(struct ppe_common_cb *ppe_common, int idx)
+{
+	int port;
+
+	if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE)
+		port = idx;
+	else
+		port = HNS_PPE_SERVICE_NW_ENGINE_NUM
+			+ ppe_common->comm_index - 1;
+
+	return port;
+}
+
+static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
+{
+	u32 i;
+	struct hns_ppe_cb *ppe_cb;
+	u32 ppe_num = ppe_common->ppe_num;
+
+	for (i = 0; i < ppe_num; i++) {
+		ppe_cb = &ppe_common->ppe_cb[i];
+		ppe_cb->dev = ppe_common->dev;
+		ppe_cb->next = NULL;
+		ppe_cb->ppe_common_cb = ppe_common;
+		ppe_cb->index = i;
+		ppe_cb->port = hns_ppe_get_port(ppe_common, i);
+		ppe_cb->io_base = hns_ppe_get_iobase(ppe_common, i);
+		ppe_cb->virq = 0;
+	}
+}
+
+static void hns_ppe_cnt_clr_ce(struct hns_ppe_cb *ppe_cb)
+{
+	dsaf_set_dev_bit(ppe_cb, PPE_TNL_0_5_CNT_CLR_CE_REG,
+			 PPE_CNT_CLR_CE_B, 1);
+}
+
+/**
+ * hns_ppe_checksum_hw - set ppe checksum caculate
+ * @ppe_device: ppe device
+ * @value: value
+ */
+static void hns_ppe_checksum_hw(struct hns_ppe_cb *ppe_cb, u32 value)
+{
+	dsaf_set_dev_field(ppe_cb, PPE_CFG_PRO_CHECK_EN_REG,
+			   0xfffffff, 0, value);
+}
+
+static void hns_ppe_set_qid_mode(struct ppe_common_cb *ppe_common,
+				 enum ppe_qid_mode qid_mdoe)
+{
+	dsaf_set_dev_field(ppe_common, PPE_COM_CFG_QID_MODE_REG,
+			   PPE_CFG_QID_MODE_CF_QID_MODE_M,
+			   PPE_CFG_QID_MODE_CF_QID_MODE_S, qid_mdoe);
+}
+
+/**
+ * hns_ppe_set_qid - set ppe qid
+ * @ppe_common: ppe common device
+ * @qid: queue id
+ */
+static void hns_ppe_set_qid(struct ppe_common_cb *ppe_common, u32 qid)
+{
+	u32 qid_mod = dsaf_read_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG);
+
+	if (!dsaf_get_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
+			    PPE_CFG_QID_MODE_DEF_QID_S)) {
+		dsaf_set_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
+			       PPE_CFG_QID_MODE_DEF_QID_S, qid);
+		dsaf_write_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG, qid_mod);
+	}
+}
+
+/**
+ * hns_ppe_set_port_mode - set port mode
+ * @ppe_device: ppe device
+ * @mode: port mode
+ */
+static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb,
+				  enum ppe_port_mode mode)
+{
+	dsaf_write_dev(ppe_cb, PPE_CFG_XGE_MODE_REG, mode);
+}
+
+/**
+ * hns_ppe_common_init_hw - init ppe common device
+ * @ppe_common: ppe common device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
+{
+	enum ppe_qid_mode qid_mode;
+	enum dsaf_mode dsaf_mode = ppe_common->dsaf_dev->dsaf_mode;
+
+	hns_ppe_com_srst(ppe_common, 0);
+	mdelay(100);
+	hns_ppe_com_srst(ppe_common, 1);
+	mdelay(100);
+
+	if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
+		switch (dsaf_mode) {
+		case DSAF_MODE_ENABLE_FIX:
+		case DSAF_MODE_DISABLE_FIX:
+			qid_mode = PPE_QID_MODE0;
+			hns_ppe_set_qid(ppe_common, 0);
+			break;
+		case DSAF_MODE_ENABLE_0VM:
+		case DSAF_MODE_DISABLE_2PORT_64VM:
+			qid_mode = PPE_QID_MODE3;
+			break;
+		case DSAF_MODE_ENABLE_8VM:
+		case DSAF_MODE_DISABLE_2PORT_16VM:
+			qid_mode = PPE_QID_MODE4;
+			break;
+		case DSAF_MODE_ENABLE_16VM:
+		case DSAF_MODE_DISABLE_6PORT_0VM:
+			qid_mode = PPE_QID_MODE5;
+			break;
+		case DSAF_MODE_ENABLE_32VM:
+		case DSAF_MODE_DISABLE_6PORT_16VM:
+			qid_mode = PPE_QID_MODE2;
+			break;
+		case DSAF_MODE_ENABLE_128VM:
+		case DSAF_MODE_DISABLE_6PORT_4VM:
+			qid_mode = PPE_QID_MODE1;
+			break;
+		case DSAF_MODE_DISABLE_2PORT_8VM:
+			qid_mode = PPE_QID_MODE7;
+			break;
+		case DSAF_MODE_DISABLE_6PORT_2VM:
+			qid_mode = PPE_QID_MODE6;
+			break;
+		default:
+			dev_err(ppe_common->dev,
+				"get ppe queue mode failed! dsaf_mode=%d\n",
+				dsaf_mode);
+			return -EINVAL;
+		}
+		hns_ppe_set_qid_mode(ppe_common, qid_mode);
+	}
+
+	dsaf_set_dev_bit(ppe_common, PPE_COM_COMMON_CNT_CLR_CE_REG,
+			 PPE_COMMON_CNT_CLR_CE_B, 1);
+
+	return 0;
+}
+
+/*clr ppe exception irq*/
+static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
+{
+	u32 clr_vlue = 0xfffffffful;
+	u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
+	u32 vld_msk = 0;
+
+	/*only care bit 0,1,7*/
+	dsaf_set_bit(vld_msk, 0, 1);
+	dsaf_set_bit(vld_msk, 1, 1);
+	dsaf_set_bit(vld_msk, 7, 1);
+
+	/*clr sts**/
+	dsaf_write_dev(ppe_cb, PPE_RINT_REG, clr_vlue);
+
+	/*for some reserved bits, so set 0**/
+	dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
+}
+
+/**
+ * ppe_init_hw - init ppe
+ * @ppe_device: ppe device
+ */
+static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
+{
+	struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb;
+	u32 port = ppe_cb->port;
+	struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev;
+
+	hns_ppe_srst_by_port(dsaf_dev, port, 0);
+	mdelay(10);
+	hns_ppe_srst_by_port(dsaf_dev, port, 1);
+
+	/* clr and msk except irq*/
+	hns_ppe_exc_irq_en(ppe_cb, 0);
+
+	if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG)
+		hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE);
+	else
+		hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE);
+	hns_ppe_checksum_hw(ppe_cb, 0xffffffff);
+	hns_ppe_cnt_clr_ce(ppe_cb);
+}
+
+/**
+ * ppe_uninit_hw - uninit ppe
+ * @ppe_device: ppe device
+ */
+static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
+{
+	u32 port;
+
+	if (ppe_cb->ppe_common_cb) {
+		port = ppe_cb->index;
+		hns_ppe_srst_by_port(ppe_cb->ppe_common_cb->dsaf_dev, port, 0);
+	}
+}
+
+void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
+{
+	u32 i;
+
+	for (i = 0; i < ppe_common->ppe_num; i++) {
+		hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
+		memset(&ppe_common->ppe_cb[i], 0, sizeof(struct hns_ppe_cb));
+	}
+}
+
+void hns_ppe_uninit(struct dsaf_device *dsaf_dev)
+{
+	u32 i;
+
+	for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+		if (dsaf_dev->ppe_common[i])
+			hns_ppe_uninit_ex(dsaf_dev->ppe_common[i]);
+		hns_rcb_common_free_cfg(dsaf_dev, i);
+		hns_ppe_common_free_cfg(dsaf_dev, i);
+	}
+}
+
+/**
+ * hns_ppe_reset - reinit ppe/rcb hw
+ * @dsaf_dev: dasf device
+ * retuen void
+ */
+void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index)
+{
+	u32 i;
+	int ret;
+	struct ppe_common_cb *ppe_common;
+
+	ppe_common = dsaf_dev->ppe_common[ppe_common_index];
+	ret = hns_ppe_common_init_hw(ppe_common);
+	if (ret)
+		return;
+
+	ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]);
+	if (ret)
+		return;
+
+	for (i = 0; i < ppe_common->ppe_num; i++)
+		hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+
+	hns_rcb_common_init_commit_hw(dsaf_dev->rcb_common[ppe_common_index]);
+}
+
+void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
+{
+	struct hns_ppe_hw_stats *hw_stats = &ppe_cb->hw_stats;
+
+	hw_stats->rx_pkts_from_sw
+		+= dsaf_read_dev(ppe_cb, PPE_HIS_RX_SW_PKT_CNT_REG);
+	hw_stats->rx_pkts
+		+= dsaf_read_dev(ppe_cb, PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG);
+	hw_stats->rx_drop_no_bd
+		+= dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_NO_BUF_CNT_REG);
+	hw_stats->rx_alloc_buf_fail
+		+= dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_FAIL_CNT_REG);
+	hw_stats->rx_alloc_buf_wait
+		+= dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_WAIT_CNT_REG);
+	hw_stats->rx_drop_no_buf
+		+= dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_FUL_CNT_REG);
+	hw_stats->rx_err_fifo_full
+		+= dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_PRT_CNT_REG);
+
+	hw_stats->tx_bd_form_rcb
+		+= dsaf_read_dev(ppe_cb, PPE_HIS_TX_BD_CNT_REG);
+	hw_stats->tx_pkts_from_rcb
+		+= dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CNT_REG);
+	hw_stats->tx_pkts
+		+= dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_OK_CNT_REG);
+	hw_stats->tx_err_fifo_empty
+		+= dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_EPT_CNT_REG);
+	hw_stats->tx_err_checksum
+		+= dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CS_FAIL_CNT_REG);
+}
+
+int hns_ppe_get_sset_count(int stringset)
+{
+	if (stringset == ETH_SS_STATS)
+		return ETH_PPE_STATIC_NUM;
+	return 0;
+}
+
+int hns_ppe_get_regs_count(void)
+{
+	return ETH_PPE_DUMP_NUM;
+}
+
+/**
+ * ppe_get_strings - get ppe srting
+ * @ppe_device: ppe device
+ * @stringset: string set type
+ * @data: output string
+ */
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data)
+{
+	char *buff = (char *)data;
+	int index = ppe_cb->index;
+
+	snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_sw_pkt", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_ok", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_drop_pkt_no_bd", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_fail", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_wait", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_drop_no_buf", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_err_fifo_full", index);
+	buff = buff + ETH_GSTRING_LEN;
+
+	snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_bd", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_ok", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_fifo_empty", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_csum_fail", index);
+}
+
+void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
+{
+	u64 *regs_buff = data;
+	struct hns_ppe_hw_stats *hw_stats = &ppe_cb->hw_stats;
+
+	regs_buff[0] = hw_stats->rx_pkts_from_sw;
+	regs_buff[1] = hw_stats->rx_pkts;
+	regs_buff[2] = hw_stats->rx_drop_no_bd;
+	regs_buff[3] = hw_stats->rx_alloc_buf_fail;
+	regs_buff[4] = hw_stats->rx_alloc_buf_wait;
+	regs_buff[5] = hw_stats->rx_drop_no_buf;
+	regs_buff[6] = hw_stats->rx_err_fifo_full;
+
+	regs_buff[7] = hw_stats->tx_bd_form_rcb;
+	regs_buff[8] = hw_stats->tx_pkts_from_rcb;
+	regs_buff[9] = hw_stats->tx_pkts;
+	regs_buff[10] = hw_stats->tx_err_fifo_empty;
+	regs_buff[11] = hw_stats->tx_err_checksum;
+}
+
+/**
+ * hns_ppe_init - init ppe device
+ * @dsaf_dev: dasf device
+ * retuen 0 - success , negative --fail
+ */
+int hns_ppe_init(struct dsaf_device *dsaf_dev)
+{
+	int i, k;
+	int ret;
+
+	for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+		ret = hns_ppe_common_get_cfg(dsaf_dev, i);
+		if (ret)
+			goto get_ppe_cfg_fail;
+
+		ret = hns_rcb_common_get_cfg(dsaf_dev, i);
+		if (ret)
+			goto get_rcb_cfg_fail;
+
+		hns_ppe_get_cfg(dsaf_dev->ppe_common[i]);
+
+		hns_rcb_get_cfg(dsaf_dev->rcb_common[i]);
+	}
+
+	for (i = 0; i < HNS_PPE_COM_NUM; i++)
+		hns_ppe_reset_common(dsaf_dev, i);
+
+	return 0;
+
+get_rcb_cfg_fail:
+	hns_ppe_common_free_cfg(dsaf_dev, i);
+get_ppe_cfg_fail:
+	for (k = i - 1; k >= 0; k--) {
+		hns_rcb_common_free_cfg(dsaf_dev, k);
+		hns_ppe_common_free_cfg(dsaf_dev, k);
+	}
+	return ret;
+}
+
+void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data)
+{
+	struct ppe_common_cb *ppe_common = ppe_cb->ppe_common_cb;
+	u32 *regs = data;
+	u32 i;
+	u32 offset;
+
+	/* ppe common registers */
+	regs[0] = dsaf_read_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG);
+	regs[1] = dsaf_read_dev(ppe_common, PPE_COM_INTEN_REG);
+	regs[2] = dsaf_read_dev(ppe_common, PPE_COM_RINT_REG);
+	regs[3] = dsaf_read_dev(ppe_common, PPE_COM_INTSTS_REG);
+	regs[4] = dsaf_read_dev(ppe_common, PPE_COM_COMMON_CNT_CLR_CE_REG);
+
+	for (i = 0; i < DSAF_TOTAL_QUEUE_NUM; i++) {
+		offset = PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 0x4 * i;
+		regs[5 + i] = dsaf_read_dev(ppe_common, offset);
+		offset = PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 0x4 * i;
+		regs[5 + i + DSAF_TOTAL_QUEUE_NUM]
+				= dsaf_read_dev(ppe_common, offset);
+		offset = PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 0x4 * i;
+		regs[5 + i + DSAF_TOTAL_QUEUE_NUM * 2]
+				= dsaf_read_dev(ppe_common, offset);
+		offset = PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 0x4 * i;
+		regs[5 + i + DSAF_TOTAL_QUEUE_NUM * 3]
+				= dsaf_read_dev(ppe_common, offset);
+	}
+
+	/* mark end of ppe regs */
+	for (i = 521; i < 524; i++)
+		regs[i] = 0xeeeeeeee;
+
+	/* ppe channel registers */
+	regs[525] = dsaf_read_dev(ppe_cb, PPE_CFG_TX_FIFO_THRSLD_REG);
+	regs[526] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_THRSLD_REG);
+	regs[527] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_PAUSE_THRSLD_REG);
+	regs[528] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_SW_BP_THRSLD_REG);
+	regs[529] = dsaf_read_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG);
+	regs[530] = dsaf_read_dev(ppe_cb, PPE_CFG_BUS_CTRL_REG);
+	regs[531] = dsaf_read_dev(ppe_cb, PPE_CFG_TNL_TO_BE_RST_REG);
+	regs[532] = dsaf_read_dev(ppe_cb, PPE_CURR_TNL_CAN_RST_REG);
+
+	regs[533] = dsaf_read_dev(ppe_cb, PPE_CFG_XGE_MODE_REG);
+	regs[534] = dsaf_read_dev(ppe_cb, PPE_CFG_MAX_FRAME_LEN_REG);
+	regs[535] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_PKT_MODE_REG);
+	regs[536] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_VLAN_TAG_REG);
+	regs[537] = dsaf_read_dev(ppe_cb, PPE_CFG_TAG_GEN_REG);
+	regs[538] = dsaf_read_dev(ppe_cb, PPE_CFG_PARSE_TAG_REG);
+	regs[539] = dsaf_read_dev(ppe_cb, PPE_CFG_PRO_CHECK_EN_REG);
+
+	regs[540] = dsaf_read_dev(ppe_cb, PPE_INTEN_REG);
+	regs[541] = dsaf_read_dev(ppe_cb, PPE_RINT_REG);
+	regs[542] = dsaf_read_dev(ppe_cb, PPE_INTSTS_REG);
+	regs[543] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_PKT_INT_REG);
+
+	regs[544] = dsaf_read_dev(ppe_cb, PPE_CFG_HEAT_DECT_TIME0_REG);
+	regs[545] = dsaf_read_dev(ppe_cb, PPE_CFG_HEAT_DECT_TIME1_REG);
+
+	/* ppe static */
+	regs[546] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_SW_PKT_CNT_REG);
+	regs[547] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG);
+	regs[548] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_NO_BUF_CNT_REG);
+	regs[549] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_BD_CNT_REG);
+	regs[550] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CNT_REG);
+	regs[551] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_OK_CNT_REG);
+	regs[552] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_EPT_CNT_REG);
+	regs[553] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CS_FAIL_CNT_REG);
+	regs[554] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_FAIL_CNT_REG);
+	regs[555] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_WAIT_CNT_REG);
+	regs[556] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_FUL_CNT_REG);
+	regs[557] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_PRT_CNT_REG);
+
+	regs[558] = dsaf_read_dev(ppe_cb, PPE_TNL_0_5_CNT_CLR_CE_REG);
+	regs[559] = dsaf_read_dev(ppe_cb, PPE_CFG_AXI_DBG_REG);
+	regs[560] = dsaf_read_dev(ppe_cb, PPE_HIS_PRO_ERR_REG);
+	regs[561] = dsaf_read_dev(ppe_cb, PPE_HIS_TNL_FIFO_ERR_REG);
+	regs[562] = dsaf_read_dev(ppe_cb, PPE_CURR_CFF_DATA_NUM_REG);
+	regs[563] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_ST_REG);
+	regs[564] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_ST_REG);
+	regs[565] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_FIFO0_REG);
+	regs[566] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_FIFO1_REG);
+	regs[567] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG);
+	regs[568] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO1_REG);
+	regs[569] = dsaf_read_dev(ppe_cb, PPE_ECO0_REG);
+	regs[570] = dsaf_read_dev(ppe_cb, PPE_ECO1_REG);
+	regs[571] = dsaf_read_dev(ppe_cb, PPE_ECO2_REG);
+
+	/* mark end of ppe regs */
+	for (i = 572; i < 576; i++)
+		regs[i] = 0xeeeeeeee;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
new file mode 100644
index 0000000..4894f9a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_PPE_H
+#define _HNS_DSAF_PPE_H
+
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_rcb.h"
+
+#define HNS_PPE_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN
+#define HNS_PPE_DEBUG_NW_ENGINE_NUM 1
+#define HNS_PPE_COM_NUM DSAF_COMM_DEV_NUM
+
+#define PPE_COMMON_REG_OFFSET 0x70000
+#define PPE_REG_OFFSET 0x10000
+
+#define ETH_PPE_DUMP_NUM 576
+#define ETH_PPE_STATIC_NUM 12
+enum ppe_qid_mode {
+	PPE_QID_MODE0 = 0,	/* fixed queue id mode */
+	PPE_QID_MODE1,		/* switch:128VM non switch:6Port/4VM/4TC */
+	PPE_QID_MODE2,		/* switch:32VM/4TC non switch:6Port/16VM */
+	PPE_QID_MODE3,		/* switch:4TC/8TAG non switch:2Port/64VM */
+	PPE_QID_MODE4,		/* switch:8VM/16TAG non switch:2Port/16VM/4TC */
+	PPE_QID_MODE5,		/* non switch:6Port/16TAG */
+	PPE_QID_MODE6,		/* non switch:6Port/2VM/8TC */
+	PPE_QID_MODE7,		/* non switch:2Port/8VM/8TC */
+};
+
+enum ppe_port_mode {
+	PPE_MODE_GE = 0,
+	PPE_MODE_XGE,
+};
+
+enum ppe_common_mode {
+	PPE_COMMON_MODE_DEBUG = 0,
+	PPE_COMMON_MODE_SERVICE,
+	PPE_COMMON_MODE_MAX
+};
+
+struct hns_ppe_hw_stats {
+	u64 rx_pkts_from_sw;
+	u64 rx_pkts;
+	u64 rx_drop_no_bd;
+	u64 rx_alloc_buf_fail;
+	u64 rx_alloc_buf_wait;
+	u64 rx_drop_no_buf;
+	u64 rx_err_fifo_full;
+	u64 tx_bd_form_rcb;
+	u64 tx_pkts_from_rcb;
+	u64 tx_pkts;
+	u64 tx_err_fifo_empty;
+	u64 tx_err_checksum;
+};
+
+struct hns_ppe_cb {
+	struct device *dev;
+	struct hns_ppe_cb *next;	/* pointer to next ppe device */
+	struct ppe_common_cb *ppe_common_cb; /* belong to */
+	struct hns_ppe_hw_stats hw_stats;
+
+	u8 index;	/* index in a ppe common device */
+	u8 port;			 /* port id in dsaf  */
+	void __iomem *io_base;
+	int virq;
+};
+
+struct ppe_common_cb {
+	struct device *dev;
+	struct dsaf_device *dsaf_dev;
+	void __iomem *io_base;
+
+	enum ppe_common_mode ppe_mode;
+
+	u8 comm_index;   /*ppe_common index*/
+
+	u32 ppe_num;
+	struct hns_ppe_cb ppe_cb[0];
+
+};
+
+int hns_ppe_init(struct dsaf_device *dsaf_dev);
+
+void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
+
+void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index);
+
+void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb);
+
+int hns_ppe_get_sset_count(int stringset);
+int hns_ppe_get_regs_count(void);
+void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data);
+
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data);
+void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data);
+#endif /* _HNS_DSAF_PPE_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
new file mode 100644
index 0000000..05ea244
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -0,0 +1,1021 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/cacheflush.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+
+#define RCB_COMMON_REG_OFFSET 0x80000
+#define TX_RING 0
+#define RX_RING 1
+
+#define RCB_RESET_WAIT_TIMES 30
+#define RCB_RESET_TRY_TIMES 10
+
+/**
+ *hns_rcb_wait_fbd_clean - clean fbd
+ *@qs: ring struct pointer array
+ *@qnum: num of array
+ *@flag: tx or rx flag
+ */
+void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
+{
+	int i, wait_cnt;
+	u32 fbd_num;
+
+	for (wait_cnt = i = 0; i < q_num; wait_cnt++) {
+		usleep_range(200, 300);
+		fbd_num = 0;
+		if (flag & RCB_INT_FLAG_TX)
+			fbd_num += dsaf_read_dev(qs[i],
+						 RCB_RING_TX_RING_FBDNUM_REG);
+		if (flag & RCB_INT_FLAG_RX)
+			fbd_num += dsaf_read_dev(qs[i],
+						 RCB_RING_RX_RING_FBDNUM_REG);
+		if (!fbd_num)
+			i++;
+		if (wait_cnt >= 10000)
+			break;
+	}
+
+	if (i < q_num)
+		dev_err(qs[i]->handle->owner_dev,
+			"queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
+}
+
+/**
+ *hns_rcb_reset_ring_hw - ring reset
+ *@q: ring struct pointer
+ */
+void hns_rcb_reset_ring_hw(struct hnae_queue *q)
+{
+	u32 wait_cnt;
+	u32 try_cnt = 0;
+	u32 could_ret;
+
+	u32 tx_fbd_num;
+
+	while (try_cnt++ < RCB_RESET_TRY_TIMES) {
+		usleep_range(100, 200);
+		tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG);
+		if (tx_fbd_num)
+			continue;
+
+		dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0);
+
+		dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
+
+		msleep(20);
+		could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
+
+		wait_cnt = 0;
+		while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) {
+			dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
+
+			dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
+
+			msleep(20);
+			could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
+
+			wait_cnt++;
+		}
+
+		dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
+
+		if (could_ret)
+			break;
+	}
+
+	if (try_cnt >= RCB_RESET_TRY_TIMES)
+		dev_err(q->dev->dev, "port%d reset ring fail\n",
+			hns_ae_get_vf_cb(q->handle)->port_index);
+}
+
+/**
+ *hns_rcb_int_ctrl_hw - rcb irq enable control
+ *@q: hnae queue struct pointer
+ *@flag:ring flag tx or rx
+ *@mask:mask
+ */
+void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
+{
+	u32 int_mask_en = !!mask;
+
+	if (flag & RCB_INT_FLAG_TX) {
+		dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
+		dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG,
+			       int_mask_en);
+	}
+
+	if (flag & RCB_INT_FLAG_RX) {
+		dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
+		dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG,
+			       int_mask_en);
+	}
+}
+
+void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
+{
+	u32 clr = 1;
+
+	if (flag & RCB_INT_FLAG_TX) {
+		dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, clr);
+		dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, clr);
+	}
+
+	if (flag & RCB_INT_FLAG_RX) {
+		dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, clr);
+		dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, clr);
+	}
+}
+
+/**
+ *hns_rcb_ring_enable_hw - enable ring
+ *@ring: rcb ring
+ */
+void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
+{
+	dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
+}
+
+void hns_rcb_start(struct hnae_queue *q, u32 val)
+{
+	hns_rcb_ring_enable_hw(q, val);
+}
+
+/**
+ *hns_rcb_common_init_commit_hw - make rcb common init completed
+ *@rcb_common: rcb common device
+ */
+void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
+{
+	wmb();	/* Sync point before breakpoint */
+	dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1);
+	wmb();	/* Sync point after breakpoint */
+}
+
+/**
+ *hns_rcb_ring_init - init rcb ring
+ *@ring_pair: ring pair control block
+ *@ring_type: ring type, RX_RING or TX_RING
+ */
+static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
+{
+	struct hnae_queue *q = &ring_pair->q;
+	struct rcb_common_cb *rcb_common = ring_pair->rcb_common;
+	u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type;
+	struct hnae_ring *ring =
+		(ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
+	dma_addr_t dma = ring->desc_dma_addr;
+
+	if (ring_type == RX_RING) {
+		dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG,
+			       (u32)dma);
+		dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
+			       (u32)((dma >> 31) >> 1));
+		dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
+			       bd_size_type);
+		dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
+			       ring_pair->port_id_in_dsa);
+		dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
+			       ring_pair->port_id_in_dsa);
+	} else {
+		dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
+			       (u32)dma);
+		dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
+			       (u32)((dma >> 31) >> 1));
+		dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
+			       bd_size_type);
+		dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
+			       ring_pair->port_id_in_dsa);
+		dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
+			       ring_pair->port_id_in_dsa);
+	}
+}
+
+/**
+ *hns_rcb_init_hw - init rcb hardware
+ *@ring: rcb ring
+ */
+void hns_rcb_init_hw(struct ring_pair_cb *ring)
+{
+	hns_rcb_ring_init(ring, RX_RING);
+	hns_rcb_ring_init(ring, TX_RING);
+}
+
+/**
+ *hns_rcb_set_port_desc_cnt - set rcb port description num
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ *@desc_cnt:BD num
+ */
+static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
+				      u32 port_idx, u32 desc_cnt)
+{
+	if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
+		port_idx = 0;
+
+	dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4,
+		       desc_cnt);
+}
+
+/**
+ *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ *@coalesced_frames:BD num for coalesced frames
+ */
+static int  hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common,
+					      u32 port_idx,
+					      u32 coalesced_frames)
+{
+	if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
+		port_idx = 0;
+	if (coalesced_frames >= rcb_common->desc_num ||
+	    coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES)
+		return -EINVAL;
+
+	dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
+		       coalesced_frames);
+	return 0;
+}
+
+/**
+ *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ * return coaleseced frames value
+ */
+static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common,
+					     u32 port_idx)
+{
+	if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
+		port_idx = 0;
+
+	return dsaf_read_dev(rcb_common,
+			     RCB_CFG_PKTLINE_REG + port_idx * 4);
+}
+
+/**
+ *hns_rcb_set_timeout - set rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@time_out:time for coalesced time_out
+ */
+static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common,
+				u32 timeout)
+{
+	dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout);
+}
+
+static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
+{
+	if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+		return HNS_RCB_SERVICE_NW_ENGINE_NUM;
+	else
+		return HNS_RCB_DEBUG_NW_ENGINE_NUM;
+}
+
+/*clr rcb comm exception irq**/
+static void hns_rcb_comm_exc_irq_en(
+			struct rcb_common_cb *rcb_common, int en)
+{
+	u32 clr_vlue = 0xfffffffful;
+	u32 msk_vlue = en ? 0 : 0xfffffffful;
+
+	/* clr int*/
+	dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue);
+
+	dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue);
+
+	dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue);
+
+	dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue);
+	dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue);
+
+	/*en msk*/
+	dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue);
+
+	dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue);
+
+	/*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/
+	dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2);
+
+	dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue);
+	dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue);
+}
+
+/**
+ *hns_rcb_common_init_hw - init rcb common hardware
+ *@rcb_common: rcb_common device
+ *retuen 0 - success , negative --fail
+ */
+int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
+{
+	u32 reg_val;
+	int i;
+	int port_num = hns_rcb_common_get_port_num(rcb_common);
+
+	hns_rcb_comm_exc_irq_en(rcb_common, 0);
+
+	reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG);
+	if (0x1 != (reg_val & 0x1)) {
+		dev_err(rcb_common->dsaf_dev->dev,
+			"RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val);
+		return -EBUSY;
+	}
+
+	for (i = 0; i < port_num; i++) {
+		hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
+		(void)hns_rcb_set_port_coalesced_frames(
+			rcb_common, i, rcb_common->coalesced_frames);
+	}
+	hns_rcb_set_timeout(rcb_common, rcb_common->timeout);
+
+	dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
+		       HNS_RCB_COMMON_ENDIAN);
+
+	return 0;
+}
+
+int hns_rcb_buf_size2type(u32 buf_size)
+{
+	int bd_size_type;
+
+	switch (buf_size) {
+	case 512:
+		bd_size_type = HNS_BD_SIZE_512_TYPE;
+		break;
+	case 1024:
+		bd_size_type = HNS_BD_SIZE_1024_TYPE;
+		break;
+	case 2048:
+		bd_size_type = HNS_BD_SIZE_2048_TYPE;
+		break;
+	case 4096:
+		bd_size_type = HNS_BD_SIZE_4096_TYPE;
+		break;
+	default:
+		bd_size_type = -EINVAL;
+	}
+
+	return bd_size_type;
+}
+
+static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
+{
+	struct hnae_ring *ring;
+	struct rcb_common_cb *rcb_common;
+	struct ring_pair_cb *ring_pair_cb;
+	u32 buf_size;
+	u16 desc_num;
+	int irq_idx;
+
+	ring_pair_cb = container_of(q, struct ring_pair_cb, q);
+	if (ring_type == RX_RING) {
+		ring = &q->rx_ring;
+		ring->io_base = ring_pair_cb->q.io_base;
+		irq_idx = HNS_RCB_IRQ_IDX_RX;
+	} else {
+		ring = &q->tx_ring;
+		ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
+			HNS_RCB_TX_REG_OFFSET;
+		irq_idx = HNS_RCB_IRQ_IDX_TX;
+	}
+
+	rcb_common = ring_pair_cb->rcb_common;
+	buf_size = rcb_common->dsaf_dev->buf_size;
+	desc_num = rcb_common->dsaf_dev->desc_num;
+
+	ring->desc = NULL;
+	ring->desc_cb = NULL;
+
+	ring->irq = ring_pair_cb->virq[irq_idx];
+	ring->desc_dma_addr = 0;
+
+	ring->buf_size = buf_size;
+	ring->desc_num = desc_num;
+	ring->max_desc_num_per_pkt = HNS_RCB_RING_MAX_BD_PER_PKT;
+	ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
+	ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE;
+	ring->next_to_use = 0;
+	ring->next_to_clean = 0;
+}
+
+static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
+{
+	ring_pair_cb->q.handle = NULL;
+
+	hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING);
+	hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
+}
+
+static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
+{
+	int comm_index = rcb_common->comm_index;
+	int port;
+	int q_num;
+
+	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+		q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
+		port = ring_idx / q_num;
+	} else {
+		port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1;
+	}
+
+	return port;
+}
+
+static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
+{
+	int comm_index = rcb_common->comm_index;
+
+	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+		return HNS_SERVICE_RING_IRQ_IDX;
+	else
+		return HNS_DEBUG_RING_IRQ_IDX + (comm_index - 1) * 2;
+}
+
+#define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
+	((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid))
+/**
+ *hns_rcb_get_cfg - get rcb config
+ *@rcb_common: rcb common device
+ */
+void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
+{
+	struct ring_pair_cb *ring_pair_cb;
+	u32 i;
+	u32 ring_num = rcb_common->ring_num;
+	int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
+	struct device_node *np = rcb_common->dsaf_dev->dev->of_node;
+
+	for (i = 0; i < ring_num; i++) {
+		ring_pair_cb = &rcb_common->ring_pair_cb[i];
+		ring_pair_cb->rcb_common = rcb_common;
+		ring_pair_cb->dev = rcb_common->dsaf_dev->dev;
+		ring_pair_cb->index = i;
+		ring_pair_cb->q.io_base =
+			RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
+		ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i);
+		ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX]
+			= irq_of_parse_and_map(np, base_irq_idx + i * 2);
+		ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX]
+			= irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1);
+		ring_pair_cb->q.phy_base =
+			RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
+		hns_rcb_ring_pair_get_cfg(ring_pair_cb);
+	}
+}
+
+/**
+ *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
+ *@rcb_common: rcb_common device
+ *@comm_index:port index
+ *return coalesced_frames
+ */
+u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port)
+{
+	int comm_index =  hns_dsaf_get_comm_idx_by_port(port);
+	struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+
+	return hns_rcb_get_port_coalesced_frames(rcb_comm, port);
+}
+
+/**
+ *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@comm_index:port index
+ *return time_out
+ */
+u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index)
+{
+	struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+
+	return rcb_comm->timeout;
+}
+
+/**
+ *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@comm_index: comm :index
+ *@etx_usecs:tx time for coalesced time_out
+ *@rx_usecs:rx time for coalesced time_out
+ */
+void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
+				int port, u32 timeout)
+{
+	int comm_index =  hns_dsaf_get_comm_idx_by_port(port);
+	struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+
+	if (rcb_comm->timeout == timeout)
+		return;
+
+	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+		dev_err(dsaf_dev->dev,
+			"error: not support coalesce_usecs setting!\n");
+		return;
+	}
+	rcb_comm->timeout = timeout;
+	hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout);
+}
+
+/**
+ *hns_rcb_set_coalesced_frames - set rcb coalesced frames
+ *@rcb_common: rcb_common device
+ *@tx_frames:tx BD num for coalesced frames
+ *@rx_frames:rx BD num for coalesced frames
+ *Return 0 on success, negative on failure
+ */
+int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
+				 int port, u32 coalesced_frames)
+{
+	int comm_index =  hns_dsaf_get_comm_idx_by_port(port);
+	struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+	u32 coalesced_reg_val;
+	int ret;
+
+	coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port);
+
+	if (coalesced_reg_val == coalesced_frames)
+		return 0;
+
+	if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) {
+		ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port,
+							coalesced_frames);
+		return ret;
+	} else {
+		return -EINVAL;
+	}
+}
+
+/**
+ *hns_rcb_get_queue_mode - get max VM number and max ring number per VM
+ *						accordding to dsaf mode
+ *@dsaf_mode: dsaf mode
+ *@max_vfn : max vfn number
+ *@max_q_per_vf:max ring number per vm
+ */
+static void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+				   u16 *max_vfn, u16 *max_q_per_vf)
+{
+	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+		switch (dsaf_mode) {
+		case DSAF_MODE_DISABLE_6PORT_0VM:
+			*max_vfn = 1;
+			*max_q_per_vf = 16;
+			break;
+		case DSAF_MODE_DISABLE_FIX:
+			*max_vfn = 1;
+			*max_q_per_vf = 1;
+			break;
+		case DSAF_MODE_DISABLE_2PORT_64VM:
+			*max_vfn = 64;
+			*max_q_per_vf = 1;
+			break;
+		case DSAF_MODE_DISABLE_6PORT_16VM:
+			*max_vfn = 16;
+			*max_q_per_vf = 1;
+			break;
+		default:
+			*max_vfn = 1;
+			*max_q_per_vf = 16;
+			break;
+		}
+	} else {
+		*max_vfn = 1;
+		*max_q_per_vf = 1;
+	}
+}
+
+int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index)
+{
+	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+		switch (dsaf_dev->dsaf_mode) {
+		case DSAF_MODE_ENABLE_FIX:
+			return 1;
+
+		case DSAF_MODE_DISABLE_FIX:
+			return 6;
+
+		case DSAF_MODE_ENABLE_0VM:
+			return 32;
+
+		case DSAF_MODE_DISABLE_6PORT_0VM:
+		case DSAF_MODE_ENABLE_16VM:
+		case DSAF_MODE_DISABLE_6PORT_2VM:
+		case DSAF_MODE_DISABLE_6PORT_16VM:
+		case DSAF_MODE_DISABLE_6PORT_4VM:
+		case DSAF_MODE_ENABLE_8VM:
+			return 96;
+
+		case DSAF_MODE_DISABLE_2PORT_16VM:
+		case DSAF_MODE_DISABLE_2PORT_8VM:
+		case DSAF_MODE_ENABLE_32VM:
+		case DSAF_MODE_DISABLE_2PORT_64VM:
+		case DSAF_MODE_ENABLE_128VM:
+			return 128;
+
+		default:
+			dev_warn(dsaf_dev->dev,
+				 "get ring num fail,use default!dsaf_mode=%d\n",
+				 dsaf_dev->dsaf_mode);
+			return 128;
+		}
+	} else {
+		return 1;
+	}
+}
+
+void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev,
+				       int comm_index)
+{
+	void __iomem *base_addr;
+
+	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+		base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
+	else
+		base_addr = dsaf_dev->sds_base
+			+ (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
+			+ RCB_COMMON_REG_OFFSET;
+
+	return base_addr;
+}
+
+static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev,
+					    int comm_index)
+{
+	struct device_node *np = dsaf_dev->dev->of_node;
+	phys_addr_t phy_addr;
+	const __be32 *tmp_addr;
+	u64 addr_offset = 0;
+	u64 size = 0;
+	int index = 0;
+
+	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+		index    = 2;
+		addr_offset = RCB_COMMON_REG_OFFSET;
+	} else {
+		index    = 1;
+		addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET +
+				RCB_COMMON_REG_OFFSET;
+	}
+	tmp_addr  = of_get_address(np, index, &size, NULL);
+	phy_addr  = of_translate_address(np, tmp_addr);
+	return phy_addr + addr_offset;
+}
+
+int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
+			   int comm_index)
+{
+	struct rcb_common_cb *rcb_common;
+	enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
+	u16 max_vfn;
+	u16 max_q_per_vf;
+	int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index);
+
+	rcb_common =
+		devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) +
+			ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL);
+	if (!rcb_common) {
+		dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n");
+		return -ENOMEM;
+	}
+	rcb_common->comm_index = comm_index;
+	rcb_common->ring_num = ring_num;
+	rcb_common->dsaf_dev = dsaf_dev;
+
+	rcb_common->desc_num = dsaf_dev->desc_num;
+	rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES;
+	rcb_common->timeout = HNS_RCB_MAX_TIME_OUT;
+
+	hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
+	rcb_common->max_vfn = max_vfn;
+	rcb_common->max_q_per_vf = max_q_per_vf;
+
+	rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index);
+	rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index);
+
+	dsaf_dev->rcb_common[comm_index] = rcb_common;
+	return 0;
+}
+
+void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev,
+			     u32 comm_index)
+{
+	dsaf_dev->rcb_common[comm_index] = NULL;
+}
+
+void hns_rcb_update_stats(struct hnae_queue *queue)
+{
+	struct ring_pair_cb *ring =
+		container_of(queue, struct ring_pair_cb, q);
+	struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev;
+	struct ppe_common_cb *ppe_common
+		= dsaf_dev->ppe_common[ring->rcb_common->comm_index];
+	struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
+
+	hw_stats->rx_pkts += dsaf_read_dev(queue,
+			 RCB_RING_RX_RING_PKTNUM_RECORD_REG);
+	dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1);
+
+	hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common,
+			 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index);
+	hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common,
+			 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index);
+
+	hw_stats->tx_pkts += dsaf_read_dev(queue,
+			 RCB_RING_TX_RING_PKTNUM_RECORD_REG);
+	dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1);
+
+	hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common,
+			 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index);
+	hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common,
+			 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index);
+}
+
+/**
+ *hns_rcb_get_stats - get rcb statistic
+ *@ring: rcb ring
+ *@data:statistic value
+ */
+void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
+{
+	u64 *regs_buff = data;
+	struct ring_pair_cb *ring =
+		container_of(queue, struct ring_pair_cb, q);
+	struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
+
+	regs_buff[0] = hw_stats->tx_pkts;
+	regs_buff[1] = hw_stats->ppe_tx_ok_pkts;
+	regs_buff[2] = hw_stats->ppe_tx_drop_pkts;
+	regs_buff[3] =
+		dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
+
+	regs_buff[4] = queue->tx_ring.stats.tx_pkts;
+	regs_buff[5] = queue->tx_ring.stats.tx_bytes;
+	regs_buff[6] = queue->tx_ring.stats.tx_err_cnt;
+	regs_buff[7] = queue->tx_ring.stats.io_err_cnt;
+	regs_buff[8] = queue->tx_ring.stats.sw_err_cnt;
+	regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt;
+	regs_buff[10] = queue->tx_ring.stats.restart_queue;
+	regs_buff[11] = queue->tx_ring.stats.tx_busy;
+
+	regs_buff[12] = hw_stats->rx_pkts;
+	regs_buff[13] = hw_stats->ppe_rx_ok_pkts;
+	regs_buff[14] = hw_stats->ppe_rx_drop_pkts;
+	regs_buff[15] =
+		dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
+
+	regs_buff[16] = queue->rx_ring.stats.rx_pkts;
+	regs_buff[17] = queue->rx_ring.stats.rx_bytes;
+	regs_buff[18] = queue->rx_ring.stats.rx_err_cnt;
+	regs_buff[19] = queue->rx_ring.stats.io_err_cnt;
+	regs_buff[20] = queue->rx_ring.stats.sw_err_cnt;
+	regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt;
+	regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt;
+	regs_buff[23] = queue->rx_ring.stats.err_pkt_len;
+	regs_buff[24] = queue->rx_ring.stats.non_vld_descs;
+	regs_buff[25] = queue->rx_ring.stats.err_bd_num;
+	regs_buff[26] = queue->rx_ring.stats.l2_err;
+	regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err;
+}
+
+/**
+ *hns_rcb_get_ring_sset_count - rcb string set count
+ *@stringset:ethtool cmd
+ *return rcb ring string set count
+ */
+int hns_rcb_get_ring_sset_count(int stringset)
+{
+	if (stringset == ETH_SS_STATS)
+		return HNS_RING_STATIC_REG_NUM;
+
+	return 0;
+}
+
+/**
+ *hns_rcb_get_common_regs_count - rcb common regs count
+ *return regs count
+ */
+int hns_rcb_get_common_regs_count(void)
+{
+	return HNS_RCB_COMMON_DUMP_REG_NUM;
+}
+
+/**
+ *rcb_get_sset_count - rcb ring regs count
+ *return regs count
+ */
+int hns_rcb_get_ring_regs_count(void)
+{
+	return HNS_RCB_RING_DUMP_REG_NUM;
+}
+
+/**
+ *hns_rcb_get_strings - get rcb string set
+ *@stringset:string set index
+ *@data:strings name value
+ *@index:queue index
+ */
+void hns_rcb_get_strings(int stringset, u8 *data, int index)
+{
+	char *buff = (char *)data;
+
+	if (stringset != ETH_SS_STATS)
+		return;
+
+	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index);
+	buff = buff + ETH_GSTRING_LEN;
+
+	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index);
+	buff = buff + ETH_GSTRING_LEN;
+
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index);
+	buff = buff + ETH_GSTRING_LEN;
+
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index);
+	buff = buff + ETH_GSTRING_LEN;
+	snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index);
+}
+
+void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
+{
+	u32 *regs = data;
+	u32 i = 0;
+
+	/*rcb common registers */
+	regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG);
+	regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG);
+	regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG);
+
+	regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG);
+	regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG);
+	regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG);
+	regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG);
+	regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG);
+	regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG);
+
+	regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG);
+	regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG);
+	regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG);
+	regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG);
+	regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG);
+	regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG);
+	regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG);
+	regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG);
+	regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG);
+	regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG);
+	regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG);
+	regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG);
+	regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG);
+	regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG);
+	regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG);
+	regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG);
+	regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG);
+	regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG);
+	regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG);
+
+	regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING);
+	regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS);
+	regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING);
+	regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD);
+	regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS);
+	regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY);
+	regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN);
+	regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK);
+	regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS);
+	regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG);
+
+	/* rcb common entry registers */
+	for (i = 0; i < 16; i++) { /* total 16 model registers */
+		regs[38 + i]
+			= dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i);
+		regs[54 + i]
+			= dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
+	}
+
+	regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG);
+	regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
+	regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
+
+	/* mark end of rcb common regs */
+	for (i = 73; i < 80; i++)
+		regs[i] = 0xcccccccc;
+}
+
+void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data)
+{
+	u32 *regs = data;
+	struct ring_pair_cb *ring_pair
+		= container_of(queue, struct ring_pair_cb, q);
+	u32 i = 0;
+
+	/*rcb ring registers */
+	regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG);
+	regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG);
+	regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG);
+	regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG);
+	regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG);
+	regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG);
+	regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG);
+	regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
+	regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG);
+
+	regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG);
+	regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG);
+	regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG);
+	regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG);
+	regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG);
+	regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG);
+	regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG);
+	regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
+	regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG);
+	regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG);
+
+	regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG);
+	regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG);
+	regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG);
+	regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG);
+	regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST);
+	regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST);
+	regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG);
+
+	regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG);
+	regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG);
+	regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG);
+	regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG);
+	regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG);
+	regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG);
+	regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG);
+	regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG);
+
+	/* mark end of ring regs */
+	for (i = 35; i < 40; i++)
+		regs[i] = 0xcccccc00 + ring_pair->index;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
new file mode 100644
index 0000000..c7db613
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_RCB_H
+#define _HNS_DSAF_RCB_H
+
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "hnae.h"
+#include "hns_dsaf_main.h"
+
+struct rcb_common_cb;
+
+#define HNS_RCB_IRQ_NUM_PER_QUEUE		2
+#define HNS_RCB_IRQ_IDX_TX			0
+#define HNS_RCB_IRQ_IDX_RX			1
+#define HNS_RCB_TX_REG_OFFSET			0x40
+
+#define HNS_RCB_SERVICE_NW_ENGINE_NUM		DSAF_COMM_CHN
+#define HNS_RCB_DEBUG_NW_ENGINE_NUM		1
+#define HNS_RCB_RING_MAX_BD_PER_PKT		3
+#define HNS_RCB_MAX_PKT_SIZE MAC_MAX_MTU
+
+#define HNS_RCB_RING_MAX_PENDING_BD		1024
+#define HNS_RCB_RING_MIN_PENDING_BD		16
+
+#define HNS_RCB_REG_OFFSET			0x10000
+
+#define HNS_RCB_MAX_COALESCED_FRAMES		1023
+#define HNS_RCB_MIN_COALESCED_FRAMES		1
+#define HNS_RCB_DEF_COALESCED_FRAMES		50
+#define HNS_RCB_MAX_TIME_OUT			0x500
+
+#define HNS_RCB_COMMON_ENDIAN			1
+
+#define HNS_BD_SIZE_512_TYPE			0
+#define HNS_BD_SIZE_1024_TYPE			1
+#define HNS_BD_SIZE_2048_TYPE			2
+#define HNS_BD_SIZE_4096_TYPE			3
+
+#define HNS_RCB_COMMON_DUMP_REG_NUM 80
+#define HNS_RCB_RING_DUMP_REG_NUM 40
+#define HNS_RING_STATIC_REG_NUM 28
+
+#define HNS_DUMP_REG_NUM			500
+#define HNS_STATIC_REG_NUM			12
+
+enum rcb_int_flag {
+	RCB_INT_FLAG_TX = 0x1,
+	RCB_INT_FLAG_RX = (0x1 << 1),
+	RCB_INT_FLAG_MAX = (0x1 << 2),	/*must be the last element */
+};
+
+struct hns_ring_hw_stats {
+	u64 tx_pkts;
+	u64 ppe_tx_ok_pkts;
+	u64 ppe_tx_drop_pkts;
+	u64 rx_pkts;
+	u64 ppe_rx_ok_pkts;
+	u64 ppe_rx_drop_pkts;
+};
+
+struct ring_pair_cb {
+	struct rcb_common_cb *rcb_common;	/*  ring belongs to */
+	struct device *dev;	/*device for DMA mapping */
+	struct hnae_queue q;
+
+	u16 index;	/* global index in a rcb common device */
+	u16 buf_size;
+
+	int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
+
+	u8 port_id_in_dsa;
+	u8 used_by_vf;
+
+	struct hns_ring_hw_stats hw_stats;
+};
+
+struct rcb_common_cb {
+	u8 __iomem *io_base;
+	phys_addr_t phy_base;
+	struct dsaf_device *dsaf_dev;
+	u16 max_vfn;
+	u16 max_q_per_vf;
+
+	u8 comm_index;
+	u32 ring_num;
+	u32 coalesced_frames; /* frames  threshold of  rx interrupt   */
+	u32 timeout; /* time threshold of  rx interrupt  */
+	u32 desc_num; /*  desc num per queue*/
+
+	struct ring_pair_cb ring_pair_cb[0];
+};
+
+int hns_rcb_buf_size2type(u32 buf_size);
+
+int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
+void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
+int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
+void hns_rcb_start(struct hnae_queue *q, u32 val);
+void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
+void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
+
+void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val);
+void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag);
+void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 enable);
+void hns_rcb_init_hw(struct ring_pair_cb *ring);
+void hns_rcb_reset_ring_hw(struct hnae_queue *q);
+void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
+
+u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int comm_index);
+u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index);
+void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
+				int comm_index, u32 timeout);
+int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
+				 int comm_index, u32 coalesce_frames);
+void hns_rcb_update_stats(struct hnae_queue *queue);
+
+void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
+
+void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_common, void *data);
+
+int hns_rcb_get_ring_sset_count(int stringset);
+int hns_rcb_get_common_regs_count(void);
+int hns_rcb_get_ring_regs_count(void);
+
+void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
+
+void hns_rcb_get_strings(int stringset, u8 *data, int index);
+#endif /* _HNS_DSAF_RCB_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
new file mode 100644
index 0000000..b475e1b
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -0,0 +1,972 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DSAF_REG_H_
+#define _DSAF_REG_H_
+
+#define HNS_GE_FIFO_ERR_INTNUM 8
+#define HNS_XGE_ERR_INTNUM 6
+#define HNS_RCB_COMM_ERR_INTNUM 12
+#define HNS_PPE_TNL_ERR_INTNUM 8
+#define HNS_DSAF_EVENT_INTNUM 21
+#define HNS_DEBUG_RING_INTNUM 4
+#define HNS_SERVICE_RING_INTNUM 256
+
+#define HNS_DEBUG_RING_IRQ_IDX (HNS_GE_FIFO_ERR_INTNUM + HNS_XGE_ERR_INTNUM +\
+		HNS_RCB_COMM_ERR_INTNUM + HNS_PPE_TNL_ERR_INTNUM +\
+		HNS_DSAF_EVENT_INTNUM)
+#define HNS_SERVICE_RING_IRQ_IDX (HNS_DEBUG_RING_IRQ_IDX +\
+		HNS_DEBUG_RING_INTNUM)
+
+#define DSAF_IRQ_NUM 18
+
+#define DSAF_MAX_PORT_NUM_PER_CHIP 8
+#define DSAF_SERVICE_PORT_NUM_PER_DSAF 6
+#define DSAF_MAX_VM_NUM 128
+
+#define DSAF_COMM_DEV_NUM 3
+#define DSAF_PPE_INODE_BASE 6
+#define HNS_DSAF_COMM_SERVICE_NW_IDX 0
+#define DSAF_DEBUG_NW_NUM	2
+#define DSAF_SERVICE_NW_NUM	6
+#define DSAF_COMM_CHN		DSAF_SERVICE_NW_NUM
+#define DSAF_GE_NUM		((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
+#define DSAF_PORT_NUM		((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
+#define DSAF_XGE_NUM		DSAF_SERVICE_NW_NUM
+#define DSAF_NODE_NUM		18
+#define DSAF_XOD_BIG_NUM	DSAF_NODE_NUM
+#define DSAF_SBM_NUM		DSAF_NODE_NUM
+#define DSAF_VOQ_NUM		DSAF_NODE_NUM
+#define DSAF_INODE_NUM		DSAF_NODE_NUM
+#define DSAF_XOD_NUM		8
+#define DSAF_TBL_NUM		8
+#define DSAF_SW_PORT_NUM	8
+#define DSAF_TOTAL_QUEUE_NUM	129
+
+#define DSAF_TCAM_SUM		512
+#define DSAF_LINE_SUM		(2048 * 14)
+
+#define DSAF_SUB_SC_NT_SRAM_CLK_SEL_REG                0x100
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG              0x180
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG              0x184
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL2_REG              0x188
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL3_REG              0x18C
+#define DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG              0x190
+#define DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG              0x194
+#define DSAF_SUB_SC_DSAF_CLK_EN_REG                    0x300
+#define DSAF_SUB_SC_DSAF_CLK_DIS_REG                   0x304
+#define DSAF_SUB_SC_NT_CLK_EN_REG                      0x308
+#define DSAF_SUB_SC_NT_CLK_DIS_REG                     0x30C
+#define DSAF_SUB_SC_XGE_CLK_EN_REG                     0x310
+#define DSAF_SUB_SC_XGE_CLK_DIS_REG                    0x314
+#define DSAF_SUB_SC_GE_CLK_EN_REG                      0x318
+#define DSAF_SUB_SC_GE_CLK_DIS_REG                     0x31C
+#define DSAF_SUB_SC_PPE_CLK_EN_REG                     0x320
+#define DSAF_SUB_SC_PPE_CLK_DIS_REG                    0x324
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_EN_REG             0x350
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_DIS_REG            0x354
+#define DSAF_SUB_SC_XBAR_RESET_REQ_REG                 0xA00
+#define DSAF_SUB_SC_XBAR_RESET_DREQ_REG                0xA04
+#define DSAF_SUB_SC_NT_RESET_REQ_REG                   0xA08
+#define DSAF_SUB_SC_NT_RESET_DREQ_REG                  0xA0C
+#define DSAF_SUB_SC_XGE_RESET_REQ_REG                  0xA10
+#define DSAF_SUB_SC_XGE_RESET_DREQ_REG                 0xA14
+#define DSAF_SUB_SC_GE_RESET_REQ0_REG                  0xA18
+#define DSAF_SUB_SC_GE_RESET_DREQ0_REG                 0xA1C
+#define DSAF_SUB_SC_GE_RESET_REQ1_REG                  0xA20
+#define DSAF_SUB_SC_GE_RESET_DREQ1_REG                 0xA24
+#define DSAF_SUB_SC_PPE_RESET_REQ_REG                  0xA48
+#define DSAF_SUB_SC_PPE_RESET_DREQ_REG                 0xA4C
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG          0xA88
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG         0xA8C
+#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG         0x2060
+#define DSAF_SUB_SC_TCAM_MBIST_EN_REG                  0x2300
+#define DSAF_SUB_SC_DSAF_CLK_ST_REG                    0x5300
+#define DSAF_SUB_SC_NT_CLK_ST_REG                      0x5304
+#define DSAF_SUB_SC_XGE_CLK_ST_REG                     0x5308
+#define DSAF_SUB_SC_GE_CLK_ST_REG                      0x530C
+#define DSAF_SUB_SC_PPE_CLK_ST_REG                     0x5310
+#define DSAF_SUB_SC_ROCEE_CLK_ST_REG                   0x5314
+#define DSAF_SUB_SC_CPU_CLK_ST_REG                     0x5318
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_ST_REG             0x5328
+#define DSAF_SUB_SC_XBAR_RESET_ST_REG                  0x5A00
+#define DSAF_SUB_SC_NT_RESET_ST_REG                    0x5A04
+#define DSAF_SUB_SC_XGE_RESET_ST_REG                   0x5A08
+#define DSAF_SUB_SC_GE_RESET_ST0_REG                   0x5A0C
+#define DSAF_SUB_SC_GE_RESET_ST1_REG                   0x5A10
+#define DSAF_SUB_SC_PPE_RESET_ST_REG                   0x5A24
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_ST_REG           0x5A44
+
+/*serdes offset**/
+#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG
+#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG
+#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL
+#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL
+#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL
+#define HNS_MAC_LANE3_CTLEDFE_REG 0x000BFF9CULL
+#define HNS_MAC_LANE0_STATE_REG 0x000BFFD4ULL
+#define HNS_MAC_LANE1_STATE_REG 0x000BFFC4ULL
+#define HNS_MAC_LANE2_STATE_REG 0x000BFFB4ULL
+#define HNS_MAC_LANE3_STATE_REG 0x000BFFA4ULL
+
+#define HILINK_RESET_TIMOUT 10000
+
+#define DSAF_SRAM_INIT_OVER_0_REG	0x0
+#define DSAF_CFG_0_REG			0x4
+#define DSAF_ECC_ERR_INVERT_0_REG	0x8
+#define DSAF_ABNORMAL_TIMEOUT_0_REG	0x1C
+#define DSAF_FSM_TIMEOUT_0_REG		0x20
+#define DSAF_DSA_REG_CNT_CLR_CE_REG	0x2C
+#define DSAF_DSA_SBM_INF_FIFO_THRD_REG	0x30
+#define DSAF_DSA_SRAM_1BIT_ECC_SEL_REG	0x34
+#define DSAF_DSA_SRAM_1BIT_ECC_CNT_REG	0x38
+#define DSAF_PFC_EN_0_REG		0x50
+#define DSAF_PFC_UNIT_CNT_0_REG		0x70
+#define DSAF_XGE_INT_MSK_0_REG		0x100
+#define DSAF_PPE_INT_MSK_0_REG		0x120
+#define DSAF_ROCEE_INT_MSK_0_REG	0x140
+#define DSAF_XGE_INT_SRC_0_REG		0x160
+#define DSAF_PPE_INT_SRC_0_REG		0x180
+#define DSAF_ROCEE_INT_SRC_0_REG	0x1A0
+#define DSAF_XGE_INT_STS_0_REG		0x1C0
+#define DSAF_PPE_INT_STS_0_REG		0x1E0
+#define DSAF_ROCEE_INT_STS_0_REG	0x200
+#define DSAF_PPE_QID_CFG_0_REG		0x300
+#define DSAF_SW_PORT_TYPE_0_REG		0x320
+#define DSAF_STP_PORT_TYPE_0_REG	0x340
+#define DSAF_MIX_DEF_QID_0_REG		0x360
+#define DSAF_PORT_DEF_VLAN_0_REG	0x380
+#define DSAF_VM_DEF_VLAN_0_REG		0x400
+
+#define DSAF_INODE_CUT_THROUGH_CFG_0_REG	0x1000
+#define DSAF_INODE_ECC_INVERT_EN_0_REG		0x1008
+#define DSAF_INODE_ECC_ERR_ADDR_0_REG		0x100C
+#define DSAF_INODE_IN_PORT_NUM_0_REG		0x1018
+#define DSAF_INODE_PRI_TC_CFG_0_REG		0x101C
+#define DSAF_INODE_BP_STATUS_0_REG		0x1020
+#define DSAF_INODE_PAD_DISCARD_NUM_0_REG	0x1028
+#define DSAF_INODE_FINAL_IN_MAN_NUM_0_REG	0x102C
+#define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG	0x1030
+#define DSAF_INODE_SBM_PID_NUM_0_REG		0x1038
+#define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG	0x103C
+#define DSAF_INODE_SBM_RELS_NUM_0_REG		0x104C
+#define DSAF_INODE_SBM_DROP_NUM_0_REG		0x1050
+#define DSAF_INODE_CRC_FALSE_NUM_0_REG		0x1054
+#define DSAF_INODE_BP_DISCARD_NUM_0_REG		0x1058
+#define DSAF_INODE_RSLT_DISCARD_NUM_0_REG	0x105C
+#define DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG	0x1060
+#define DSAF_INODE_VOQ_OVER_NUM_0_REG		0x1068
+#define DSAF_INODE_BD_SAVE_STATUS_0_REG		0x1900
+#define DSAF_INODE_BD_ORDER_STATUS_0_REG	0x1950
+#define DSAF_INODE_SW_VLAN_TAG_DISC_0_REG	0x1A00
+#define DSAF_INODE_IN_DATA_STP_DISC_0_REG	0x1A50
+#define DSAF_INODE_GE_FC_EN_0_REG		0x1B00
+#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG		0x1B50
+#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG		0x1C00
+
+#define DSAF_SBM_CFG_REG_0_REG			0x2000
+#define DSAF_SBM_BP_CFG_0_XGE_REG_0_REG		0x2004
+#define DSAF_SBM_BP_CFG_0_PPE_REG_0_REG		0x2304
+#define DSAF_SBM_BP_CFG_0_ROCEE_REG_0_REG	0x2604
+#define DSAF_SBM_BP_CFG_1_REG_0_REG		0x2008
+#define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG		0x200C
+#define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG		0x230C
+#define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG	0x260C
+#define DSAF_SBM_FREE_CNT_0_0_REG		0x2010
+#define DSAF_SBM_FREE_CNT_1_0_REG		0x2014
+#define DSAF_SBM_BP_CNT_0_0_REG			0x2018
+#define DSAF_SBM_BP_CNT_1_0_REG			0x201C
+#define DSAF_SBM_BP_CNT_2_0_REG			0x2020
+#define DSAF_SBM_BP_CNT_3_0_REG			0x2024
+#define DSAF_SBM_INER_ST_0_REG			0x2028
+#define DSAF_SBM_MIB_REQ_FAILED_TC_0_REG	0x202C
+#define DSAF_SBM_LNK_INPORT_CNT_0_REG		0x2030
+#define DSAF_SBM_LNK_DROP_CNT_0_REG		0x2034
+#define DSAF_SBM_INF_OUTPORT_CNT_0_REG		0x2038
+#define DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG	0x203C
+#define DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG	0x2040
+#define DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG	0x2044
+#define DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG	0x2048
+#define DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG	0x204C
+#define DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG	0x2050
+#define DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG	0x2054
+#define DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG	0x2058
+#define DSAF_SBM_LNK_REQ_CNT_0_REG		0x205C
+#define DSAF_SBM_LNK_RELS_CNT_0_REG		0x2060
+#define DSAF_SBM_BP_CFG_3_REG_0_REG		0x2068
+#define DSAF_SBM_BP_CFG_4_REG_0_REG		0x206C
+
+#define DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG	0x3000
+#define DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG	0x3004
+#define DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG	0x3008
+#define DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG	0x300C
+#define DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG	0x3010
+#define DSAF_XOD_ETS_TOKEN_CFG_0_REG		0x3014
+#define DSAF_XOD_PFS_CFG_0_0_REG		0x3018
+#define DSAF_XOD_PFS_CFG_1_0_REG		0x301C
+#define DSAF_XOD_PFS_CFG_2_0_REG		0x3020
+#define DSAF_XOD_GNT_L_0_REG			0x3024
+#define DSAF_XOD_GNT_H_0_REG			0x3028
+#define DSAF_XOD_CONNECT_STATE_0_REG		0x302C
+#define DSAF_XOD_RCVPKT_CNT_0_REG		0x3030
+#define DSAF_XOD_RCVTC0_CNT_0_REG		0x3034
+#define DSAF_XOD_RCVTC1_CNT_0_REG		0x3038
+#define DSAF_XOD_RCVTC2_CNT_0_REG		0x303C
+#define DSAF_XOD_RCVTC3_CNT_0_REG		0x3040
+#define DSAF_XOD_RCVVC0_CNT_0_REG		0x3044
+#define DSAF_XOD_RCVVC1_CNT_0_REG		0x3048
+#define DSAF_XOD_XGE_RCVIN0_CNT_0_REG		0x304C
+#define DSAF_XOD_XGE_RCVIN1_CNT_0_REG		0x3050
+#define DSAF_XOD_XGE_RCVIN2_CNT_0_REG		0x3054
+#define DSAF_XOD_XGE_RCVIN3_CNT_0_REG		0x3058
+#define DSAF_XOD_XGE_RCVIN4_CNT_0_REG		0x305C
+#define DSAF_XOD_XGE_RCVIN5_CNT_0_REG		0x3060
+#define DSAF_XOD_XGE_RCVIN6_CNT_0_REG		0x3064
+#define DSAF_XOD_XGE_RCVIN7_CNT_0_REG		0x3068
+#define DSAF_XOD_PPE_RCVIN0_CNT_0_REG		0x306C
+#define DSAF_XOD_PPE_RCVIN1_CNT_0_REG		0x3070
+#define DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG		0x3074
+#define DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG		0x3078
+#define DSAF_XOD_FIFO_STATUS_0_REG		0x307C
+
+#define DSAF_VOQ_ECC_INVERT_EN_0_REG		0x4004
+#define DSAF_VOQ_SRAM_PKT_NUM_0_REG		0x4008
+#define DSAF_VOQ_IN_PKT_NUM_0_REG		0x400C
+#define DSAF_VOQ_OUT_PKT_NUM_0_REG		0x4010
+#define DSAF_VOQ_ECC_ERR_ADDR_0_REG		0x4014
+#define DSAF_VOQ_BP_STATUS_0_REG		0x4018
+#define DSAF_VOQ_SPUP_IDLE_0_REG		0x401C
+#define DSAF_VOQ_XGE_XOD_REQ_0_0_REG		0x4024
+#define DSAF_VOQ_XGE_XOD_REQ_1_0_REG		0x4028
+#define DSAF_VOQ_PPE_XOD_REQ_0_REG		0x402C
+#define DSAF_VOQ_ROCEE_XOD_REQ_0_REG		0x4030
+#define DSAF_VOQ_BP_ALL_THRD_0_REG		0x4034
+
+#define DSAF_TBL_CTRL_0_REG			0x5000
+#define DSAF_TBL_INT_MSK_0_REG			0x5004
+#define DSAF_TBL_INT_SRC_0_REG			0x5008
+#define DSAF_TBL_INT_STS_0_REG			0x5100
+#define DSAF_TBL_TCAM_ADDR_0_REG		0x500C
+#define DSAF_TBL_LINE_ADDR_0_REG		0x5010
+#define DSAF_TBL_TCAM_HIGH_0_REG		0x5014
+#define DSAF_TBL_TCAM_LOW_0_REG			0x5018
+#define DSAF_TBL_TCAM_MCAST_CFG_4_0_REG		0x501C
+#define DSAF_TBL_TCAM_MCAST_CFG_3_0_REG		0x5020
+#define DSAF_TBL_TCAM_MCAST_CFG_2_0_REG		0x5024
+#define DSAF_TBL_TCAM_MCAST_CFG_1_0_REG		0x5028
+#define DSAF_TBL_TCAM_MCAST_CFG_0_0_REG		0x502C
+#define DSAF_TBL_TCAM_UCAST_CFG_0_REG		0x5030
+#define DSAF_TBL_LIN_CFG_0_REG			0x5034
+#define DSAF_TBL_TCAM_RDATA_HIGH_0_REG		0x5038
+#define DSAF_TBL_TCAM_RDATA_LOW_0_REG		0x503C
+#define DSAF_TBL_TCAM_RAM_RDATA4_0_REG		0x5040
+#define DSAF_TBL_TCAM_RAM_RDATA3_0_REG		0x5044
+#define DSAF_TBL_TCAM_RAM_RDATA2_0_REG		0x5048
+#define DSAF_TBL_TCAM_RAM_RDATA1_0_REG		0x504C
+#define DSAF_TBL_TCAM_RAM_RDATA0_0_REG		0x5050
+#define DSAF_TBL_LIN_RDATA_0_REG		0x5054
+#define DSAF_TBL_DA0_MIS_INFO1_0_REG		0x5058
+#define DSAF_TBL_DA0_MIS_INFO0_0_REG		0x505C
+#define DSAF_TBL_SA_MIS_INFO2_0_REG		0x5104
+#define DSAF_TBL_SA_MIS_INFO1_0_REG		0x5098
+#define DSAF_TBL_SA_MIS_INFO0_0_REG		0x509C
+#define DSAF_TBL_PUL_0_REG			0x50A0
+#define DSAF_TBL_OLD_RSLT_0_REG			0x50A4
+#define DSAF_TBL_OLD_SCAN_VAL_0_REG		0x50A8
+#define DSAF_TBL_DFX_CTRL_0_REG			0x50AC
+#define DSAF_TBL_DFX_STAT_0_REG			0x50B0
+#define DSAF_TBL_DFX_STAT_2_0_REG		0x5108
+#define DSAF_TBL_LKUP_NUM_I_0_REG		0x50C0
+#define DSAF_TBL_LKUP_NUM_O_0_REG		0x50E0
+#define DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG	0x510C
+
+#define DSAF_INODE_FIFO_WL_0_REG		0x6000
+#define DSAF_ONODE_FIFO_WL_0_REG		0x6020
+#define DSAF_XGE_GE_WORK_MODE_0_REG		0x6040
+#define DSAF_XGE_APP_RX_LINK_UP_0_REG		0x6080
+#define DSAF_NETPORT_CTRL_SIG_0_REG		0x60A0
+#define DSAF_XGE_CTRL_SIG_CFG_0_REG		0x60C0
+
+#define PPE_COM_CFG_QID_MODE_REG		0x0
+#define PPE_COM_INTEN_REG			0x110
+#define PPE_COM_RINT_REG			0x114
+#define PPE_COM_INTSTS_REG			0x118
+#define PPE_COM_COMMON_CNT_CLR_CE_REG		0x1120
+#define PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG	0x300
+#define PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG	0x600
+#define PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG	0x900
+#define PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG	0xC00
+#define PPE_COM_COMMON_CNT_CLR_CE_REG		0x1120
+
+#define PPE_CFG_TX_FIFO_THRSLD_REG		0x0
+#define PPE_CFG_RX_FIFO_THRSLD_REG		0x4
+#define PPE_CFG_RX_FIFO_PAUSE_THRSLD_REG	0x8
+#define PPE_CFG_RX_FIFO_SW_BP_THRSLD_REG	0xC
+#define PPE_CFG_PAUSE_IDLE_CNT_REG		0x10
+#define PPE_CFG_BUS_CTRL_REG			0x40
+#define PPE_CFG_TNL_TO_BE_RST_REG		0x48
+#define PPE_CURR_TNL_CAN_RST_REG		0x4C
+#define PPE_CFG_XGE_MODE_REG			0x80
+#define PPE_CFG_MAX_FRAME_LEN_REG		0x84
+#define PPE_CFG_RX_PKT_MODE_REG			0x88
+#define PPE_CFG_RX_VLAN_TAG_REG			0x8C
+#define PPE_CFG_TAG_GEN_REG			0x90
+#define PPE_CFG_PARSE_TAG_REG			0x94
+#define PPE_CFG_PRO_CHECK_EN_REG		0x98
+#define PPE_INTEN_REG				0x100
+#define PPE_RINT_REG				0x104
+#define PPE_INTSTS_REG				0x108
+#define PPE_CFG_RX_PKT_INT_REG			0x140
+#define PPE_CFG_HEAT_DECT_TIME0_REG		0x144
+#define PPE_CFG_HEAT_DECT_TIME1_REG		0x148
+#define PPE_HIS_RX_SW_PKT_CNT_REG		0x200
+#define PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG		0x204
+#define PPE_HIS_RX_PKT_NO_BUF_CNT_REG		0x208
+#define PPE_HIS_TX_BD_CNT_REG			0x20C
+#define PPE_HIS_TX_PKT_CNT_REG			0x210
+#define PPE_HIS_TX_PKT_OK_CNT_REG		0x214
+#define PPE_HIS_TX_PKT_EPT_CNT_REG		0x218
+#define PPE_HIS_TX_PKT_CS_FAIL_CNT_REG		0x21C
+#define PPE_HIS_RX_APP_BUF_FAIL_CNT_REG		0x220
+#define PPE_HIS_RX_APP_BUF_WAIT_CNT_REG		0x224
+#define PPE_HIS_RX_PKT_DROP_FUL_CNT_REG		0x228
+#define PPE_HIS_RX_PKT_DROP_PRT_CNT_REG		0x22C
+#define PPE_TNL_0_5_CNT_CLR_CE_REG		0x300
+#define PPE_CFG_AXI_DBG_REG			0x304
+#define PPE_HIS_PRO_ERR_REG			0x308
+#define PPE_HIS_TNL_FIFO_ERR_REG		0x30C
+#define PPE_CURR_CFF_DATA_NUM_REG		0x310
+#define PPE_CURR_RX_ST_REG			0x314
+#define PPE_CURR_TX_ST_REG			0x318
+#define PPE_CURR_RX_FIFO0_REG			0x31C
+#define PPE_CURR_RX_FIFO1_REG			0x320
+#define PPE_CURR_TX_FIFO0_REG			0x324
+#define PPE_CURR_TX_FIFO1_REG			0x328
+#define PPE_ECO0_REG				0x32C
+#define PPE_ECO1_REG				0x330
+#define PPE_ECO2_REG				0x334
+
+#define RCB_COM_CFG_ENDIAN_REG			0x0
+#define RCB_COM_CFG_SYS_FSH_REG			0xC
+#define RCB_COM_CFG_INIT_FLAG_REG		0x10
+#define RCB_COM_CFG_PKT_REG			0x30
+#define RCB_COM_CFG_RINVLD_REG			0x34
+#define RCB_COM_CFG_FNA_REG			0x38
+#define RCB_COM_CFG_FA_REG			0x3C
+#define RCB_COM_CFG_PKT_TC_BP_REG		0x40
+#define RCB_COM_CFG_PPE_TNL_CLKEN_REG		0x44
+
+#define RCB_COM_INTMSK_TX_PKT_REG		0x3A0
+#define RCB_COM_RINT_TX_PKT_REG			0x3A8
+#define RCB_COM_INTMASK_ECC_ERR_REG		0x400
+#define RCB_COM_INTSTS_ECC_ERR_REG		0x408
+#define RCB_COM_EBD_SRAM_ERR_REG		0x410
+#define RCB_COM_RXRING_ERR_REG			0x41C
+#define RCB_COM_TXRING_ERR_REG			0x420
+#define RCB_COM_TX_FBD_ERR_REG			0x424
+#define RCB_SRAM_ECC_CHK_EN_REG			0x428
+#define RCB_SRAM_ECC_CHK0_REG			0x42C
+#define RCB_SRAM_ECC_CHK1_REG			0x430
+#define RCB_SRAM_ECC_CHK2_REG			0x434
+#define RCB_SRAM_ECC_CHK3_REG			0x438
+#define RCB_SRAM_ECC_CHK4_REG			0x43c
+#define RCB_SRAM_ECC_CHK5_REG			0x440
+#define RCB_ECC_ERR_ADDR0_REG			0x450
+#define RCB_ECC_ERR_ADDR3_REG			0x45C
+#define RCB_ECC_ERR_ADDR4_REG			0x460
+#define RCB_ECC_ERR_ADDR5_REG			0x464
+
+#define RCB_COM_SF_CFG_INTMASK_RING		0x480
+#define RCB_COM_SF_CFG_RING_STS			0x484
+#define RCB_COM_SF_CFG_RING			0x488
+#define RCB_COM_SF_CFG_INTMASK_BD		0x48C
+#define RCB_COM_SF_CFG_BD_RINT_STS		0x470
+#define RCB_COM_RCB_RD_BD_BUSY			0x490
+#define RCB_COM_RCB_FBD_CRT_EN			0x494
+#define RCB_COM_AXI_WR_ERR_INTMASK		0x498
+#define RCB_COM_AXI_ERR_STS			0x49C
+#define RCB_COM_CHK_TX_FBD_NUM_REG		0x4a0
+
+#define RCB_CFG_BD_NUM_REG			0x9000
+#define RCB_CFG_PKTLINE_REG			0x9050
+
+#define RCB_CFG_OVERTIME_REG			0x9300
+#define RCB_CFG_PKTLINE_INT_NUM_REG		0x9304
+#define RCB_CFG_OVERTIME_INT_NUM_REG		0x9308
+
+#define RCB_RING_RX_RING_BASEADDR_L_REG		0x00000
+#define RCB_RING_RX_RING_BASEADDR_H_REG		0x00004
+#define RCB_RING_RX_RING_BD_NUM_REG		0x00008
+#define RCB_RING_RX_RING_BD_LEN_REG		0x0000C
+#define RCB_RING_RX_RING_PKTLINE_REG		0x00010
+#define RCB_RING_RX_RING_TAIL_REG		0x00018
+#define RCB_RING_RX_RING_HEAD_REG		0x0001C
+#define RCB_RING_RX_RING_FBDNUM_REG		0x00020
+#define RCB_RING_RX_RING_PKTNUM_RECORD_REG	0x0002C
+
+#define RCB_RING_TX_RING_BASEADDR_L_REG		0x00040
+#define RCB_RING_TX_RING_BASEADDR_H_REG		0x00044
+#define RCB_RING_TX_RING_BD_NUM_REG		0x00048
+#define RCB_RING_TX_RING_BD_LEN_REG		0x0004C
+#define RCB_RING_TX_RING_PKTLINE_REG		0x00050
+#define RCB_RING_TX_RING_TAIL_REG		0x00058
+#define RCB_RING_TX_RING_HEAD_REG		0x0005C
+#define RCB_RING_TX_RING_FBDNUM_REG		0x00060
+#define RCB_RING_TX_RING_OFFSET_REG		0x00064
+#define RCB_RING_TX_RING_PKTNUM_RECORD_REG	0x0006C
+
+#define RCB_RING_PREFETCH_EN_REG		0x0007C
+#define RCB_RING_CFG_VF_NUM_REG			0x00080
+#define RCB_RING_ASID_REG			0x0008C
+#define RCB_RING_RX_VM_REG			0x00090
+#define RCB_RING_T0_BE_RST			0x00094
+#define RCB_RING_COULD_BE_RST			0x00098
+#define RCB_RING_WRR_WEIGHT_REG			0x0009c
+
+#define RCB_RING_INTMSK_RXWL_REG		0x000A0
+#define RCB_RING_INTSTS_RX_RING_REG		0x000A4
+#define RCB_RING_INTMSK_TXWL_REG		0x000AC
+#define RCB_RING_INTSTS_TX_RING_REG		0x000B0
+#define RCB_RING_INTMSK_RX_OVERTIME_REG		0x000B8
+#define RCB_RING_INTSTS_RX_OVERTIME_REG		0x000BC
+#define RCB_RING_INTMSK_TX_OVERTIME_REG		0x000C4
+#define RCB_RING_INTSTS_TX_OVERTIME_REG		0x000C8
+
+#define GMAC_DUPLEX_TYPE_REG			0x0008UL
+#define GMAC_FD_FC_TYPE_REG			0x000CUL
+#define GMAC_FC_TX_TIMER_REG			0x001CUL
+#define GMAC_FD_FC_ADDR_LOW_REG			0x0020UL
+#define GMAC_FD_FC_ADDR_HIGH_REG		0x0024UL
+#define GMAC_IPG_TX_TIMER_REG			0x0030UL
+#define GMAC_PAUSE_THR_REG			0x0038UL
+#define GMAC_MAX_FRM_SIZE_REG			0x003CUL
+#define GMAC_PORT_MODE_REG			0x0040UL
+#define GMAC_PORT_EN_REG			0x0044UL
+#define GMAC_PAUSE_EN_REG			0x0048UL
+#define GMAC_SHORT_RUNTS_THR_REG		0x0050UL
+#define GMAC_AN_NEG_STATE_REG			0x0058UL
+#define GMAC_TX_LOCAL_PAGE_REG			0x005CUL
+#define GMAC_TRANSMIT_CONTROL_REG		0x0060UL
+#define GMAC_REC_FILT_CONTROL_REG		0x0064UL
+#define GMAC_PTP_CONFIG_REG			0x0074UL
+
+#define GMAC_RX_OCTETS_TOTAL_OK_REG		0x0080UL
+#define GMAC_RX_OCTETS_BAD_REG			0x0084UL
+#define GMAC_RX_UC_PKTS_REG			0x0088UL
+#define GMAC_RX_MC_PKTS_REG			0x008CUL
+#define GMAC_RX_BC_PKTS_REG			0x0090UL
+#define GMAC_RX_PKTS_64OCTETS_REG		0x0094UL
+#define GMAC_RX_PKTS_65TO127OCTETS_REG		0x0098UL
+#define GMAC_RX_PKTS_128TO255OCTETS_REG		0x009CUL
+#define GMAC_RX_PKTS_255TO511OCTETS_REG		0x00A0UL
+#define GMAC_RX_PKTS_512TO1023OCTETS_REG	0x00A4UL
+#define GMAC_RX_PKTS_1024TO1518OCTETS_REG	0x00A8UL
+#define GMAC_RX_PKTS_1519TOMAXOCTETS_REG	0x00ACUL
+#define GMAC_RX_FCS_ERRORS_REG			0x00B0UL
+#define GMAC_RX_TAGGED_REG			0x00B4UL
+#define GMAC_RX_DATA_ERR_REG			0x00B8UL
+#define GMAC_RX_ALIGN_ERRORS_REG		0x00BCUL
+#define GMAC_RX_LONG_ERRORS_REG			0x00C0UL
+#define GMAC_RX_JABBER_ERRORS_REG		0x00C4UL
+#define GMAC_RX_PAUSE_MACCTRL_FRAM_REG		0x00C8UL
+#define GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG	0x00CCUL
+#define GMAC_RX_VERY_LONG_ERR_CNT_REG		0x00D0UL
+#define GMAC_RX_RUNT_ERR_CNT_REG		0x00D4UL
+#define GMAC_RX_SHORT_ERR_CNT_REG		0x00D8UL
+#define GMAC_RX_FILT_PKT_CNT_REG		0x00E8UL
+#define GMAC_RX_OCTETS_TOTAL_FILT_REG		0x00ECUL
+#define GMAC_OCTETS_TRANSMITTED_OK_REG		0x0100UL
+#define GMAC_OCTETS_TRANSMITTED_BAD_REG		0x0104UL
+#define GMAC_TX_UC_PKTS_REG			0x0108UL
+#define GMAC_TX_MC_PKTS_REG			0x010CUL
+#define GMAC_TX_BC_PKTS_REG			0x0110UL
+#define GMAC_TX_PKTS_64OCTETS_REG		0x0114UL
+#define GMAC_TX_PKTS_65TO127OCTETS_REG		0x0118UL
+#define GMAC_TX_PKTS_128TO255OCTETS_REG		0x011CUL
+#define GMAC_TX_PKTS_255TO511OCTETS_REG		0x0120UL
+#define GMAC_TX_PKTS_512TO1023OCTETS_REG	0x0124UL
+#define GMAC_TX_PKTS_1024TO1518OCTETS_REG	0x0128UL
+#define GMAC_TX_PKTS_1519TOMAXOCTETS_REG	0x012CUL
+#define GMAC_TX_EXCESSIVE_LENGTH_DROP_REG	0x014CUL
+#define GMAC_TX_UNDERRUN_REG			0x0150UL
+#define GMAC_TX_TAGGED_REG			0x0154UL
+#define GMAC_TX_CRC_ERROR_REG			0x0158UL
+#define GMAC_TX_PAUSE_FRAMES_REG		0x015CUL
+#define GAMC_RX_MAX_FRAME			0x0170UL
+#define GMAC_LINE_LOOP_BACK_REG			0x01A8UL
+#define GMAC_CF_CRC_STRIP_REG			0x01B0UL
+#define GMAC_MODE_CHANGE_EN_REG			0x01B4UL
+#define GMAC_SIXTEEN_BIT_CNTR_REG		0x01CCUL
+#define GMAC_LD_LINK_COUNTER_REG		0x01D0UL
+#define GMAC_LOOP_REG				0x01DCUL
+#define GMAC_RECV_CONTROL_REG			0x01E0UL
+#define GMAC_VLAN_CODE_REG			0x01E8UL
+#define GMAC_RX_OVERRUN_CNT_REG			0x01ECUL
+#define GMAC_RX_LENGTHFIELD_ERR_CNT_REG		0x01F4UL
+#define GMAC_RX_FAIL_COMMA_CNT_REG		0x01F8UL
+#define GMAC_STATION_ADDR_LOW_0_REG		0x0200UL
+#define GMAC_STATION_ADDR_HIGH_0_REG		0x0204UL
+#define GMAC_STATION_ADDR_LOW_1_REG		0x0208UL
+#define GMAC_STATION_ADDR_HIGH_1_REG		0x020CUL
+#define GMAC_STATION_ADDR_LOW_2_REG		0x0210UL
+#define GMAC_STATION_ADDR_HIGH_2_REG		0x0214UL
+#define GMAC_STATION_ADDR_LOW_3_REG		0x0218UL
+#define GMAC_STATION_ADDR_HIGH_3_REG		0x021CUL
+#define GMAC_STATION_ADDR_LOW_4_REG		0x0220UL
+#define GMAC_STATION_ADDR_HIGH_4_REG		0x0224UL
+#define GMAC_STATION_ADDR_LOW_5_REG		0x0228UL
+#define GMAC_STATION_ADDR_HIGH_5_REG		0x022CUL
+#define GMAC_STATION_ADDR_LOW_MSK_0_REG		0x0230UL
+#define GMAC_STATION_ADDR_HIGH_MSK_0_REG	0x0234UL
+#define GMAC_STATION_ADDR_LOW_MSK_1_REG		0x0238UL
+#define GMAC_STATION_ADDR_HIGH_MSK_1_REG	0x023CUL
+#define GMAC_MAC_SKIP_LEN_REG			0x0240UL
+#define GMAC_TX_LOOP_PKT_PRI_REG		0x0378UL
+
+#define XGMAC_INT_STATUS_REG			0x0
+#define XGMAC_INT_ENABLE_REG			0x4
+#define XGMAC_INT_SET_REG			0x8
+#define XGMAC_IERR_U_INFO_REG			0xC
+#define XGMAC_OVF_INFO_REG			0x10
+#define XGMAC_OVF_CNT_REG			0x14
+#define XGMAC_PORT_MODE_REG			0x40
+#define XGMAC_CLK_ENABLE_REG			0x44
+#define XGMAC_RESET_REG				0x48
+#define XGMAC_LINK_CONTROL_REG			0x50
+#define XGMAC_LINK_STATUS_REG			0x54
+#define XGMAC_SPARE_REG				0xC0
+#define XGMAC_SPARE_CNT_REG			0xC4
+
+#define XGMAC_MAC_ENABLE_REG			0x100
+#define XGMAC_MAC_CONTROL_REG			0x104
+#define XGMAC_MAC_IPG_REG			0x120
+#define XGMAC_MAC_MSG_CRC_EN_REG		0x124
+#define XGMAC_MAC_MSG_IMG_REG			0x128
+#define XGMAC_MAC_MSG_FC_CFG_REG		0x12C
+#define XGMAC_MAC_MSG_TC_CFG_REG		0x130
+#define XGMAC_MAC_PAD_SIZE_REG			0x134
+#define XGMAC_MAC_MIN_PKT_SIZE_REG		0x138
+#define XGMAC_MAC_MAX_PKT_SIZE_REG		0x13C
+#define XGMAC_MAC_PAUSE_CTRL_REG		0x160
+#define XGMAC_MAC_PAUSE_TIME_REG		0x164
+#define XGMAC_MAC_PAUSE_GAP_REG			0x168
+#define XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG		0x16C
+#define XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG		0x170
+#define XGMAC_MAC_PAUSE_PEER_MAC_H_REG		0x174
+#define XGMAC_MAC_PAUSE_PEER_MAC_L_REG		0x178
+#define XGMAC_MAC_PFC_PRI_EN_REG		0x17C
+#define XGMAC_MAC_1588_CTRL_REG			0x180
+#define XGMAC_MAC_1588_TX_PORT_DLY_REG		0x184
+#define XGMAC_MAC_1588_RX_PORT_DLY_REG		0x188
+#define XGMAC_MAC_1588_ASYM_DLY_REG		0x18C
+#define XGMAC_MAC_1588_ADJUST_CFG_REG		0x190
+#define XGMAC_MAC_Y1731_ETH_TYPE_REG		0x194
+#define XGMAC_MAC_MIB_CONTROL_REG		0x198
+#define XGMAC_MAC_WAN_RATE_ADJUST_REG		0x19C
+#define XGMAC_MAC_TX_ERR_MARK_REG		0x1A0
+#define XGMAC_MAC_TX_LF_RF_CONTROL_REG		0x1A4
+#define XGMAC_MAC_RX_LF_RF_STATUS_REG		0x1A8
+#define XGMAC_MAC_TX_RUNT_PKT_CNT_REG		0x1C0
+#define XGMAC_MAC_RX_RUNT_PKT_CNT_REG		0x1C4
+#define XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG	0x1C8
+#define XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG	0x1CC
+#define XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG	0x1D0
+#define XGMAC_MAC_RX_ERR_MSG_CNT_REG		0x1D4
+#define XGMAC_MAC_RX_ERR_EFD_CNT_REG		0x1D8
+#define XGMAC_MAC_ERR_INFO_REG			0x1DC
+#define XGMAC_MAC_DBG_INFO_REG			0x1E0
+
+#define XGMAC_PCS_BASER_SYNC_THD_REG		0x330
+#define XGMAC_PCS_STATUS1_REG			0x404
+#define XGMAC_PCS_BASER_STATUS1_REG		0x410
+#define XGMAC_PCS_BASER_STATUS2_REG		0x414
+#define XGMAC_PCS_BASER_SEEDA_0_REG		0x420
+#define XGMAC_PCS_BASER_SEEDA_1_REG		0x424
+#define XGMAC_PCS_BASER_SEEDB_0_REG		0x428
+#define XGMAC_PCS_BASER_SEEDB_1_REG		0x42C
+#define XGMAC_PCS_BASER_TEST_CONTROL_REG	0x430
+#define XGMAC_PCS_BASER_TEST_ERR_CNT_REG	0x434
+#define XGMAC_PCS_DBG_INFO_REG			0x4C0
+#define XGMAC_PCS_DBG_INFO1_REG			0x4C4
+#define XGMAC_PCS_DBG_INFO2_REG			0x4C8
+#define XGMAC_PCS_DBG_INFO3_REG			0x4CC
+
+#define XGMAC_PMA_ENABLE_REG			0x700
+#define XGMAC_PMA_CONTROL_REG			0x704
+#define XGMAC_PMA_SIGNAL_STATUS_REG		0x708
+#define XGMAC_PMA_DBG_INFO_REG			0x70C
+#define XGMAC_PMA_FEC_ABILITY_REG		0x740
+#define XGMAC_PMA_FEC_CONTROL_REG		0x744
+#define XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG	0x750
+#define XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG	0x760
+
+#define XGMAC_TX_PKTS_FRAGMENT			0x0000
+#define XGMAC_TX_PKTS_UNDERSIZE			0x0008
+#define XGMAC_TX_PKTS_UNDERMIN			0x0010
+#define XGMAC_TX_PKTS_64OCTETS			0x0018
+#define XGMAC_TX_PKTS_65TO127OCTETS		0x0020
+#define XGMAC_TX_PKTS_128TO255OCTETS		0x0028
+#define XGMAC_TX_PKTS_256TO511OCTETS		0x0030
+#define XGMAC_TX_PKTS_512TO1023OCTETS		0x0038
+#define XGMAC_TX_PKTS_1024TO1518OCTETS		0x0040
+#define XGMAC_TX_PKTS_1519TOMAXOCTETS		0x0048
+#define XGMAC_TX_PKTS_1519TOMAXOCTETSOK		0x0050
+#define XGMAC_TX_PKTS_OVERSIZE			0x0058
+#define XGMAC_TX_PKTS_JABBER			0x0060
+#define XGMAC_TX_GOODPKTS			0x0068
+#define XGMAC_TX_GOODOCTETS			0x0070
+#define XGMAC_TX_TOTAL_PKTS			0x0078
+#define XGMAC_TX_TOTALOCTETS			0x0080
+#define XGMAC_TX_UNICASTPKTS			0x0088
+#define XGMAC_TX_MULTICASTPKTS			0x0090
+#define XGMAC_TX_BROADCASTPKTS			0x0098
+#define XGMAC_TX_PRI0PAUSEPKTS			0x00a0
+#define XGMAC_TX_PRI1PAUSEPKTS			0x00a8
+#define XGMAC_TX_PRI2PAUSEPKTS			0x00b0
+#define XGMAC_TX_PRI3PAUSEPKTS			0x00b8
+#define XGMAC_TX_PRI4PAUSEPKTS			0x00c0
+#define XGMAC_TX_PRI5PAUSEPKTS			0x00c8
+#define XGMAC_TX_PRI6PAUSEPKTS			0x00d0
+#define XGMAC_TX_PRI7PAUSEPKTS			0x00d8
+#define XGMAC_TX_MACCTRLPKTS			0x00e0
+#define XGMAC_TX_1731PKTS			0x00e8
+#define XGMAC_TX_1588PKTS			0x00f0
+#define XGMAC_RX_FROMAPPGOODPKTS		0x00f8
+#define XGMAC_RX_FROMAPPBADPKTS			0x0100
+#define XGMAC_TX_ERRALLPKTS			0x0108
+
+#define XGMAC_RX_PKTS_FRAGMENT			0x0110
+#define XGMAC_RX_PKTSUNDERSIZE			0x0118
+#define XGMAC_RX_PKTS_UNDERMIN			0x0120
+#define XGMAC_RX_PKTS_64OCTETS			0x0128
+#define XGMAC_RX_PKTS_65TO127OCTETS		0x0130
+#define XGMAC_RX_PKTS_128TO255OCTETS		0x0138
+#define XGMAC_RX_PKTS_256TO511OCTETS		0x0140
+#define XGMAC_RX_PKTS_512TO1023OCTETS		0x0148
+#define XGMAC_RX_PKTS_1024TO1518OCTETS		0x0150
+#define XGMAC_RX_PKTS_1519TOMAXOCTETS		0x0158
+#define XGMAC_RX_PKTS_1519TOMAXOCTETSOK		0x0160
+#define XGMAC_RX_PKTS_OVERSIZE			0x0168
+#define XGMAC_RX_PKTS_JABBER			0x0170
+#define XGMAC_RX_GOODPKTS			0x0178
+#define XGMAC_RX_GOODOCTETS			0x0180
+#define XGMAC_RX_TOTAL_PKTS			0x0188
+#define XGMAC_RX_TOTALOCTETS			0x0190
+#define XGMAC_RX_UNICASTPKTS			0x0198
+#define XGMAC_RX_MULTICASTPKTS			0x01a0
+#define XGMAC_RX_BROADCASTPKTS			0x01a8
+#define XGMAC_RX_PRI0PAUSEPKTS			0x01b0
+#define XGMAC_RX_PRI1PAUSEPKTS			0x01b8
+#define XGMAC_RX_PRI2PAUSEPKTS			0x01c0
+#define XGMAC_RX_PRI3PAUSEPKTS			0x01c8
+#define XGMAC_RX_PRI4PAUSEPKTS			0x01d0
+#define XGMAC_RX_PRI5PAUSEPKTS			0x01d8
+#define XGMAC_RX_PRI6PAUSEPKTS			0x01e0
+#define XGMAC_RX_PRI7PAUSEPKTS			0x01e8
+#define XGMAC_RX_MACCTRLPKTS			0x01f0
+#define XGMAC_TX_SENDAPPGOODPKTS		0x01f8
+#define XGMAC_TX_SENDAPPBADPKTS			0x0200
+#define XGMAC_RX_1731PKTS			0x0208
+#define XGMAC_RX_SYMBOLERRPKTS			0x0210
+#define XGMAC_RX_FCSERRPKTS			0x0218
+
+#define XGMAC_TRX_CORE_SRST_M			0x2080
+
+#define DSAF_CFG_EN_S 0
+#define DSAF_CFG_TC_MODE_S 1
+#define DSAF_CFG_CRC_EN_S 2
+#define DSAF_CFG_SBM_INIT_S 3
+#define DSAF_CFG_MIX_MODE_S 4
+#define DSAF_CFG_STP_MODE_S 5
+#define DSAF_CFG_LOCA_ADDR_EN_S 6
+
+#define DSAF_CNT_CLR_CE_S 0
+#define DSAF_SNAP_EN_S 1
+
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_XGE 41
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000 410
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_GE_2500 103
+
+#define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1)
+#define DSAF_PFC_UNINT_CNT_S 0
+
+#define DSAF_PPE_QID_CFG_M 0xFF
+#define DSAF_PPE_QID_CFG_S 0
+
+#define DSAF_SW_PORT_TYPE_M 3
+#define DSAF_SW_PORT_TYPE_S 0
+
+#define DSAF_STP_PORT_TYPE_M 7
+#define DSAF_STP_PORT_TYPE_S 0
+
+#define DSAF_INODE_IN_PORT_NUM_M 7
+#define DSAF_INODE_IN_PORT_NUM_S 0
+
+#define HNS_DSAF_I4TC_CFG 0x18688688
+#define HNS_DSAF_I8TC_CFG 0x18FAC688
+
+#define DSAF_SBM_CFG_SHCUT_EN_S 0
+#define DSAF_SBM_CFG_EN_S 1
+#define DSAF_SBM_CFG_MIB_EN_S 2
+#define DSAF_SBM_CFG_ECC_INVERT_EN_S 3
+
+#define DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S 0
+#define DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S 10
+#define DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+#define DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S 20
+#define DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M (((1ULL << 11) - 1) << 20)
+
+#define DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S 0
+#define DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S 10
+#define DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_SBM_CFG2_SET_BUF_NUM_S 0
+#define DSAF_SBM_CFG2_SET_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG2_RESET_BUF_NUM_S 10
+#define DSAF_SBM_CFG2_RESET_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S 0
+#define DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 10
+#define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_TBL_TCAM_ADDR_S 0
+#define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1)
+
+#define DSAF_TBL_LINE_ADDR_S 0
+#define DSAF_TBL_LINE_ADDR_M ((1ULL << 15) - 1)
+
+#define DSAF_TBL_MCAST_CFG4_VM128_112_S 0
+#define DSAF_TBL_MCAST_CFG4_VM128_112_M (((1ULL << 7) - 1) << 0)
+#define DSAF_TBL_MCAST_CFG4_ITEM_VLD_S 7
+#define DSAF_TBL_MCAST_CFG4_OLD_EN_S 8
+
+#define DSAF_TBL_MCAST_CFG0_XGE5_0_S 0
+#define DSAF_TBL_MCAST_CFG0_XGE5_0_M (((1ULL << 6) - 1) << 0)
+#define DSAF_TBL_MCAST_CFG0_VM25_0_S 6
+#define DSAF_TBL_MCAST_CFG0_VM25_0_M (((1ULL << 26) - 1) << 6)
+
+#define DSAF_TBL_UCAST_CFG1_OUT_PORT_S 0
+#define DSAF_TBL_UCAST_CFG1_OUT_PORT_M (((1ULL << 8) - 1) << 0)
+#define DSAF_TBL_UCAST_CFG1_DVC_S 8
+#define DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S 9
+#define DSAF_TBL_UCAST_CFG1_ITEM_VLD_S 10
+#define DSAF_TBL_UCAST_CFG1_OLD_EN_S 11
+
+#define DSAF_TBL_LINE_CFG_OUT_PORT_S 0
+#define DSAF_TBL_LINE_CFG_OUT_PORT_M (((1ULL << 8) - 1) << 0)
+#define DSAF_TBL_LINE_CFG_DVC_S 8
+#define DSAF_TBL_LINE_CFG_MAC_DISCARD_S 9
+
+#define DSAF_TBL_PUL_OLD_RSLT_RE_S 0
+#define DSAF_TBL_PUL_MCAST_VLD_S 1
+#define DSAF_TBL_PUL_TCAM_DATA_VLD_S 2
+#define DSAF_TBL_PUL_UCAST_VLD_S 3
+#define DSAF_TBL_PUL_LINE_VLD_S 4
+#define DSAF_TBL_PUL_TCAM_LOAD_S 5
+#define DSAF_TBL_PUL_LINE_LOAD_S 6
+
+#define DSAF_TBL_DFX_LINE_LKUP_NUM_EN_S 0
+#define DSAF_TBL_DFX_UC_LKUP_NUM_EN_S 1
+#define DSAF_TBL_DFX_MC_LKUP_NUM_EN_S 2
+#define DSAF_TBL_DFX_BC_LKUP_NUM_EN_S 3
+#define DSAF_TBL_DFX_RAM_ERR_INJECT_EN_S 4
+
+#define DSAF_VOQ_BP_ALL_DOWNTHRD_S 0
+#define DSAF_VOQ_BP_ALL_DOWNTHRD_M (((1ULL << 10) - 1) << 0)
+#define DSAF_VOQ_BP_ALL_UPTHRD_S 10
+#define DSAF_VOQ_BP_ALL_UPTHRD_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_XGE_GE_WORK_MODE_S 0
+#define DSAF_XGE_GE_LOOPBACK_S 1
+
+#define DSAF_FC_XGE_TX_PAUSE_S 0
+#define DSAF_REGS_XGE_CNT_CAR_S 1
+
+#define PPE_CFG_QID_MODE_DEF_QID_S	0
+#define PPE_CFG_QID_MODE_DEF_QID_M	(0xff << PPE_CFG_QID_MODE_DEF_QID_S)
+
+#define PPE_CFG_QID_MODE_CF_QID_MODE_S	8
+#define PPE_CFG_QID_MODE_CF_QID_MODE_M	(0x7 << PPE_CFG_QID_MODE_CF_QID_MODE_S)
+
+#define PPE_CNT_CLR_CE_B	0
+#define PPE_CNT_CLR_SNAP_EN_B	1
+
+#define PPE_COMMON_CNT_CLR_CE_B	0
+#define PPE_COMMON_CNT_CLR_SNAP_EN_B	1
+
+#define GMAC_DUPLEX_TYPE_B 0
+
+#define GMAC_FC_TX_TIMER_S 0
+#define GMAC_FC_TX_TIMER_M 0xffff
+
+#define GMAC_MAX_FRM_SIZE_S 0
+#define GMAC_MAX_FRM_SIZE_M 0xffff
+
+#define GMAC_PORT_MODE_S	0
+#define GMAC_PORT_MODE_M	0xf
+
+#define GMAC_RGMII_1000M_DELAY_B	4
+#define GMAC_MII_TX_EDGE_SEL_B		5
+#define GMAC_FIFO_ERR_AUTO_RST_B	6
+#define GMAC_DBG_CLK_LOS_MSK_B		7
+
+#define GMAC_PORT_RX_EN_B	1
+#define GMAC_PORT_TX_EN_B	2
+
+#define GMAC_PAUSE_EN_RX_FDFC_B 0
+#define GMAC_PAUSE_EN_TX_FDFC_B 1
+#define GMAC_PAUSE_EN_TX_HDFC_B 2
+
+#define GMAC_SHORT_RUNTS_THR_S 0
+#define GMAC_SHORT_RUNTS_THR_M 0x1f
+
+#define GMAC_AN_NEG_STAT_FD_B		5
+#define GMAC_AN_NEG_STAT_HD_B		6
+#define GMAC_AN_NEG_STAT_RF1_DUPLIEX_B	12
+#define GMAC_AN_NEG_STAT_RF2_B		13
+
+#define GMAC_AN_NEG_STAT_NP_LNK_OK_B	15
+#define GMAC_AN_NEG_STAT_RX_SYNC_OK_B	20
+#define GMAC_AN_NEG_STAT_AN_DONE_B	21
+
+#define GMAC_AN_NEG_STAT_PS_S		7
+#define GMAC_AN_NEG_STAT_PS_M		(0x3 << GMAC_AN_NEG_STAT_PS_S)
+
+#define GMAC_AN_NEG_STAT_SPEED_S	10
+#define GMAC_AN_NEG_STAT_SPEED_M	(0x3 << GMAC_AN_NEG_STAT_SPEED_S)
+
+#define GMAC_TX_AN_EN_B		5
+#define GMAC_TX_CRC_ADD_B	6
+#define GMAC_TX_PAD_EN_B	7
+
+#define GMAC_LINE_LOOPBACK_B	0
+
+#define GMAC_LP_REG_CF_EXT_DRV_LP_B	1
+#define GMAC_LP_REG_CF2MI_LP_EN_B	2
+
+#define GMAC_MODE_CHANGE_EB_B	0
+
+#define GMAC_RECV_CTRL_STRIP_PAD_EN_B	3
+#define GMAC_RECV_CTRL_RUNT_PKT_EN_B	4
+
+#define GMAC_TX_LOOP_PKT_HIG_PRI_B	0
+#define GMAC_TX_LOOP_PKT_EN_B		1
+
+#define XGMAC_PORT_MODE_TX_S		0x0
+#define XGMAC_PORT_MODE_TX_M		(0x3 << XGMAC_PORT_MODE_TX_S)
+#define XGMAC_PORT_MODE_TX_40G_B	0x3
+#define XGMAC_PORT_MODE_RX_S		0x4
+#define XGMAC_PORT_MODE_RX_M		(0x3 << XGMAC_PORT_MODE_RX_S)
+#define XGMAC_PORT_MODE_RX_40G_B	0x7
+
+#define XGMAC_ENABLE_TX_B		0
+#define XGMAC_ENABLE_RX_B		1
+
+#define XGMAC_CTL_TX_FCS_B		0
+#define XGMAC_CTL_TX_PAD_B		1
+#define XGMAC_CTL_TX_PREAMBLE_TRANS_B	3
+#define XGMAC_CTL_TX_UNDER_MIN_ERR_B	4
+#define XGMAC_CTL_TX_TRUNCATE_B		5
+#define XGMAC_CTL_TX_1588_B		8
+#define XGMAC_CTL_TX_1731_B		9
+#define XGMAC_CTL_TX_PFC_B		10
+#define XGMAC_CTL_RX_FCS_B		16
+#define XGMAC_CTL_RX_FCS_STRIP_B	17
+#define XGMAC_CTL_RX_PREAMBLE_TRANS_B	19
+#define XGMAC_CTL_RX_UNDER_MIN_ERR_B	20
+#define XGMAC_CTL_RX_TRUNCATE_B		21
+#define XGMAC_CTL_RX_1588_B		24
+#define XGMAC_CTL_RX_1731_B		25
+#define XGMAC_CTL_RX_PFC_B		26
+
+#define XGMAC_PMA_FEC_CTL_TX_B		0
+#define XGMAC_PMA_FEC_CTL_RX_B		1
+#define XGMAC_PMA_FEC_CTL_ERR_EN	2
+#define XGMAC_PMA_FEC_CTL_ERR_SH	3
+
+#define XGMAC_PAUSE_CTL_TX_B		0
+#define XGMAC_PAUSE_CTL_RX_B		1
+#define XGMAC_PAUSE_CTL_RSP_MODE_B	2
+#define XGMAC_PAUSE_CTL_TX_XOFF_B	3
+
+static inline void dsaf_write_reg(void *base, u32 reg, u32 value)
+{
+	u8 __iomem *reg_addr = ACCESS_ONCE(base);
+
+	writel(value, reg_addr + reg);
+}
+
+#define dsaf_write_dev(a, reg, value) \
+	dsaf_write_reg((a)->io_base, (reg), (value))
+
+static inline u32 dsaf_read_reg(u8 *base, u32 reg)
+{
+	u8 __iomem *reg_addr = ACCESS_ONCE(base);
+
+	return readl(reg_addr + reg);
+}
+
+#define dsaf_read_dev(a, reg) \
+	dsaf_read_reg((a)->io_base, (reg))
+
+#define dsaf_set_field(origin, mask, shift, val) \
+	do { \
+		(origin) &= (~(mask)); \
+		(origin) |= (((val) << (shift)) & (mask)); \
+	} while (0)
+
+#define dsaf_set_bit(origin, shift, val) \
+	dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
+
+static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+				      u32 val)
+{
+	u32 origin = dsaf_read_reg(base, reg);
+
+	dsaf_set_field(origin, mask, shift, val);
+	dsaf_write_reg(base, reg, origin);
+}
+
+#define dsaf_set_dev_field(dev, reg, mask, shift, val) \
+	dsaf_set_reg_field((dev)->io_base, (reg), (mask), (shift), (val))
+
+#define dsaf_set_dev_bit(dev, reg, bit, val) \
+	dsaf_set_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit), (val))
+
+#define dsaf_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define dsaf_get_bit(origin, shift) \
+	dsaf_get_field((origin), (1ull << (shift)), (shift))
+
+static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+{
+	u32 origin;
+
+	origin = dsaf_read_reg(base, reg);
+	return dsaf_get_field(origin, mask, shift);
+}
+
+#define dsaf_get_dev_field(dev, reg, mask, shift) \
+	dsaf_get_reg_field((dev)->io_base, (reg), (mask), (shift))
+
+#define dsaf_get_dev_bit(dev, reg, bit) \
+	dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
+
+#define dsaf_write_b(addr, data)\
+	writeb((data), (__iomem unsigned char *)(addr))
+#define dsaf_read_b(addr)\
+	readb((__iomem unsigned char *)(addr))
+
+#define hns_mac_reg_read64(drv, offset) \
+	readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset))))
+
+#endif	/* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
new file mode 100644
index 0000000..dab5ecf
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -0,0 +1,837 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <asm-generic/io-64-nonatomic-hi-lo.h>
+#include <linux/of_mdio.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_xgmac.h"
+#include "hns_dsaf_reg.h"
+
+static const struct mac_stats_string g_xgmac_stats_string[] = {
+	{"xgmac_tx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(tx_fragment_err)},
+	{"xgmac_tx_good_pkts_minto64", MAC_STATS_FIELD_OFF(tx_undersize)},
+	{"xgmac_tx_total_pkts_minto64",	MAC_STATS_FIELD_OFF(tx_under_min_pkts)},
+	{"xgmac_tx_pkts_64", MAC_STATS_FIELD_OFF(tx_64bytes)},
+	{"xgmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)},
+	{"xgmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)},
+	{"xgmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)},
+	{"xgmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)},
+	{"xgmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)},
+	{"xgmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)},
+	{"xgmac_tx_good_pkts_1519tomax",
+		MAC_STATS_FIELD_OFF(tx_1519tomax_good)},
+	{"xgmac_tx_good_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_oversize)},
+	{"xgmac_tx_bad_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_jabber_err)},
+	{"xgmac_tx_good_pkts_all", MAC_STATS_FIELD_OFF(tx_good_pkts)},
+	{"xgmac_tx_good_byte_all", MAC_STATS_FIELD_OFF(tx_good_bytes)},
+	{"xgmac_tx_total_pkt", MAC_STATS_FIELD_OFF(tx_total_pkts)},
+	{"xgmac_tx_total_byt", MAC_STATS_FIELD_OFF(tx_total_bytes)},
+	{"xgmac_tx_uc_pkt", MAC_STATS_FIELD_OFF(tx_uc_pkts)},
+	{"xgmac_tx_mc_pkt", MAC_STATS_FIELD_OFF(tx_mc_pkts)},
+	{"xgmac_tx_bc_pkt", MAC_STATS_FIELD_OFF(tx_bc_pkts)},
+	{"xgmac_tx_pause_frame_num", MAC_STATS_FIELD_OFF(tx_pfc_tc0)},
+	{"xgmac_tx_pfc_per_1pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc1)},
+	{"xgmac_tx_pfc_per_2pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc2)},
+	{"xgmac_tx_pfc_per_3pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc3)},
+	{"xgmac_tx_pfc_per_4pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc4)},
+	{"xgmac_tx_pfc_per_5pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc5)},
+	{"xgmac_tx_pfc_per_6pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc6)},
+	{"xgmac_tx_pfc_per_7pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc7)},
+	{"xgmac_tx_mac_ctrol_frame", MAC_STATS_FIELD_OFF(tx_ctrl)},
+	{"xgmac_tx_1731_pkts", MAC_STATS_FIELD_OFF(tx_1731_pkts)},
+	{"xgmac_tx_1588_pkts", MAC_STATS_FIELD_OFF(tx_1588_pkts)},
+	{"xgmac_rx_good_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_good_from_sw)},
+	{"xgmac_rx_bad_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_bad_from_sw)},
+	{"xgmac_tx_bad_pkt_64tomax", MAC_STATS_FIELD_OFF(tx_bad_pkts)},
+
+	{"xgmac_rx_not_well_pkt", MAC_STATS_FIELD_OFF(rx_fragment_err)},
+	{"xgmac_rx_good_well_pkt", MAC_STATS_FIELD_OFF(rx_undersize)},
+	{"xgmac_rx_total_pkt", MAC_STATS_FIELD_OFF(rx_under_min)},
+	{"xgmac_rx_pkt_64", MAC_STATS_FIELD_OFF(rx_64bytes)},
+	{"xgmac_rx_pkt_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
+	{"xgmac_rx_pkt_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
+	{"xgmac_rx_pkt_256to511", MAC_STATS_FIELD_OFF(rx_256to511)},
+	{"xgmac_rx_pkt_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)},
+	{"xgmac_rx_pkt_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)},
+	{"xgmac_rx_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)},
+	{"xgmac_rx_good_pkt_1519tomax",	MAC_STATS_FIELD_OFF(rx_1519tomax_good)},
+	{"xgmac_rx_good_pkt_untramax", MAC_STATS_FIELD_OFF(rx_oversize)},
+	{"xgmac_rx_bad_pkt_untramax", MAC_STATS_FIELD_OFF(rx_jabber_err)},
+	{"xgmac_rx_good_pkt", MAC_STATS_FIELD_OFF(rx_good_pkts)},
+	{"xgmac_rx_good_byt", MAC_STATS_FIELD_OFF(rx_good_bytes)},
+	{"xgmac_rx_pkt", MAC_STATS_FIELD_OFF(rx_total_pkts)},
+	{"xgmac_rx_byt", MAC_STATS_FIELD_OFF(rx_total_bytes)},
+	{"xgmac_rx_uc_pkt", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
+	{"xgmac_rx_mc_pkt", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+	{"xgmac_rx_bc_pkt", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
+	{"xgmac_rx_pause_frame_num", MAC_STATS_FIELD_OFF(rx_pfc_tc0)},
+	{"xgmac_rx_pfc_per_1pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc1)},
+	{"xgmac_rx_pfc_per_2pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc2)},
+	{"xgmac_rx_pfc_per_3pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc3)},
+	{"xgmac_rx_pfc_per_4pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc4)},
+	{"xgmac_rx_pfc_per_5pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc5)},
+	{"xgmac_rx_pfc_per_6pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc6)},
+	{"xgmac_rx_pfc_per_7pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc7)},
+	{"xgmac_rx_mac_control", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)},
+	{"xgmac_tx_good_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_good_to_sw)},
+	{"xgmac_tx_bad_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_bad_to_sw)},
+	{"xgmac_rx_1731_pkt", MAC_STATS_FIELD_OFF(rx_1731_pkts)},
+	{"xgmac_rx_symbol_err_pkt", MAC_STATS_FIELD_OFF(rx_symbol_err)},
+	{"xgmac_rx_fcs_pkt", MAC_STATS_FIELD_OFF(rx_fcs_err)}
+};
+
+/**
+ *hns_xgmac_tx_enable - xgmac port tx enable
+ *@drv: mac driver
+ *@value: value of enable
+ */
+static void hns_xgmac_tx_enable(struct mac_driver *drv, u32 value)
+{
+	dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_TX_B, !!value);
+}
+
+/**
+ *hns_xgmac_rx_enable - xgmac port rx enable
+ *@drv: mac driver
+ *@value: value of enable
+ */
+static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value)
+{
+	dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_RX_B, !!value);
+}
+
+/**
+ *hns_xgmac_enable - enable xgmac port
+ *@drv: mac driver
+ *@mode: mode of mac port
+ */
+static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	struct dsaf_device *dsaf_dev
+		= (struct dsaf_device *)dev_get_drvdata(drv->dev);
+	u32 port = drv->mac_id;
+
+	hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 1);
+	mdelay(10);
+
+	/*enable XGE rX/tX */
+	if (mode == MAC_COMM_MODE_TX) {
+		hns_xgmac_tx_enable(drv, 1);
+	} else if (mode == MAC_COMM_MODE_RX) {
+		hns_xgmac_rx_enable(drv, 1);
+	} else if (mode == MAC_COMM_MODE_RX_AND_TX) {
+		hns_xgmac_tx_enable(drv, 1);
+		hns_xgmac_rx_enable(drv, 1);
+	} else {
+		dev_err(drv->dev, "error mac mode:%d\n", mode);
+	}
+}
+
+/**
+ *hns_xgmac_disable - disable xgmac port
+ *@mac_drv: mac driver
+ *@mode: mode of mac port
+ */
+static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	struct dsaf_device *dsaf_dev
+		= (struct dsaf_device *)dev_get_drvdata(drv->dev);
+	u32 port = drv->mac_id;
+
+	if (mode == MAC_COMM_MODE_TX) {
+		hns_xgmac_tx_enable(drv, 0);
+	} else if (mode == MAC_COMM_MODE_RX) {
+		hns_xgmac_rx_enable(drv, 0);
+	} else if (mode == MAC_COMM_MODE_RX_AND_TX) {
+		hns_xgmac_tx_enable(drv, 0);
+		hns_xgmac_rx_enable(drv, 0);
+	}
+
+	mdelay(10);
+	hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 0);
+}
+
+/**
+ *hns_xgmac_pma_fec_enable - xgmac PMA FEC enable
+ *@drv: mac driver
+ *@tx_value: tx value
+ *@rx_value: rx value
+ *return status
+ */
+static void hns_xgmac_pma_fec_enable(struct mac_driver *drv, u32 tx_value,
+				     u32 rx_value)
+{
+	u32 origin = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG);
+
+	dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_TX_B, !!tx_value);
+	dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_RX_B, !!rx_value);
+	dsaf_write_dev(drv, XGMAC_PMA_FEC_CONTROL_REG, origin);
+}
+
+/* clr exc irq for xge*/
+static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en)
+{
+	u32 clr_vlue = 0xfffffffful;
+	u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
+
+	dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue);
+	dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue);
+}
+
+/**
+ *hns_xgmac_init - initialize XGE
+ *@mac_drv: mac driver
+ */
+static void hns_xgmac_init(void *mac_drv)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	struct dsaf_device *dsaf_dev
+		= (struct dsaf_device *)dev_get_drvdata(drv->dev);
+	u32 port = drv->mac_id;
+
+	hns_dsaf_xge_srst_by_port(dsaf_dev, port, 0);
+	mdelay(100);
+	hns_dsaf_xge_srst_by_port(dsaf_dev, port, 1);
+
+	mdelay(100);
+	hns_xgmac_exc_irq_en(drv, 0);
+
+	hns_xgmac_pma_fec_enable(drv, 0x0, 0x0);
+
+	hns_xgmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
+}
+
+/**
+ *hns_xgmac_config_pad_and_crc - set xgmac pad and crc enable the same time
+ *@mac_drv: mac driver
+ *@newval:enable of pad and crc
+ */
+static void hns_xgmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	u32 origin = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+
+	dsaf_set_bit(origin, XGMAC_CTL_TX_PAD_B, !!newval);
+	dsaf_set_bit(origin, XGMAC_CTL_TX_FCS_B, !!newval);
+	dsaf_set_bit(origin, XGMAC_CTL_RX_FCS_B, !!newval);
+	dsaf_write_dev(drv, XGMAC_MAC_CONTROL_REG, origin);
+}
+
+/**
+ *hns_xgmac_pausefrm_cfg - set pause param about xgmac
+ *@mac_drv: mac driver
+ *@newval:enable of pad and crc
+ */
+static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	u32 origin = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+
+	dsaf_set_bit(origin, XGMAC_PAUSE_CTL_TX_B, !!tx_en);
+	dsaf_set_bit(origin, XGMAC_PAUSE_CTL_RX_B, !!rx_en);
+	dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin);
+}
+
+static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+	u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+		| (mac_addr[3] << 16) | (mac_addr[2] << 24);
+	dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG, low_val);
+	dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG, high_val);
+}
+
+/**
+ *hns_xgmac_set_rx_ignore_pause_frames - set rx pause param about xgmac
+ *@mac_drv: mac driver
+ *@enable:enable rx pause param
+ */
+static void hns_xgmac_set_rx_ignore_pause_frames(void *mac_drv, u32 enable)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG,
+			 XGMAC_PAUSE_CTL_RX_B, !!enable);
+}
+
+/**
+ *hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac
+ *@mac_drv: mac driver
+ *@enable:enable tx pause param
+ */
+static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG,
+			 XGMAC_PAUSE_CTL_TX_B, !!enable);
+
+	/*if enable is not zero ,set tx pause time */
+	if (enable)
+		dsaf_write_dev(drv, XGMAC_MAC_PAUSE_TIME_REG, enable);
+}
+
+/**
+ *hns_xgmac_get_id - get xgmac port id
+ *@mac_drv: mac driver
+ *@newval:xgmac max frame length
+ */
+static void hns_xgmac_get_id(void *mac_drv, u8 *mac_id)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	*mac_id = drv->mac_id;
+}
+
+/**
+ *hns_xgmac_config_max_frame_length - set xgmac max frame length
+ *@mac_drv: mac driver
+ *@newval:xgmac max frame length
+ */
+static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval);
+}
+
+void hns_xgmac_update_stats(void *mac_drv)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats;
+
+	/* TX */
+	hw_stats->tx_fragment_err
+		= hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT);
+	hw_stats->tx_undersize
+		= hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE);
+	hw_stats->tx_under_min_pkts
+		= hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN);
+	hw_stats->tx_64bytes = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS);
+	hw_stats->tx_65to127
+		= hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS);
+	hw_stats->tx_128to255
+		= hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS);
+	hw_stats->tx_256to511
+		= hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS);
+	hw_stats->tx_512to1023
+		= hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS);
+	hw_stats->tx_1024to1518
+		= hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS);
+	hw_stats->tx_1519tomax
+		= hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS);
+	hw_stats->tx_1519tomax_good
+		= hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK);
+	hw_stats->tx_oversize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE);
+	hw_stats->tx_jabber_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER);
+	hw_stats->tx_good_pkts = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS);
+	hw_stats->tx_good_bytes = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS);
+	hw_stats->tx_total_pkts = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS);
+	hw_stats->tx_total_bytes
+		= hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS);
+	hw_stats->tx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS);
+	hw_stats->tx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS);
+	hw_stats->tx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS);
+	hw_stats->tx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS);
+	hw_stats->tx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS);
+	hw_stats->tx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS);
+	hw_stats->tx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS);
+	hw_stats->tx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS);
+	hw_stats->tx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS);
+	hw_stats->tx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS);
+	hw_stats->tx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS);
+	hw_stats->tx_ctrl = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS);
+	hw_stats->tx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS);
+	hw_stats->tx_1588_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS);
+	hw_stats->rx_good_from_sw
+		= hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS);
+	hw_stats->rx_bad_from_sw
+		= hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS);
+	hw_stats->tx_bad_pkts = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS);
+
+	/* RX */
+	hw_stats->rx_fragment_err
+		= hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT);
+	hw_stats->rx_undersize
+		= hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE);
+	hw_stats->rx_under_min
+		= hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN);
+	hw_stats->rx_64bytes = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS);
+	hw_stats->rx_65to127
+		= hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS);
+	hw_stats->rx_128to255
+		= hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS);
+	hw_stats->rx_256to511
+		= hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS);
+	hw_stats->rx_512to1023
+		= hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS);
+	hw_stats->rx_1024to1518
+		= hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS);
+	hw_stats->rx_1519tomax
+		= hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS);
+	hw_stats->rx_1519tomax_good
+		= hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK);
+	hw_stats->rx_oversize = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE);
+	hw_stats->rx_jabber_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER);
+	hw_stats->rx_good_pkts = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS);
+	hw_stats->rx_good_bytes = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS);
+	hw_stats->rx_total_pkts = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS);
+	hw_stats->rx_total_bytes
+		= hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS);
+	hw_stats->rx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS);
+	hw_stats->rx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS);
+	hw_stats->rx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS);
+	hw_stats->rx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS);
+	hw_stats->rx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS);
+	hw_stats->rx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS);
+	hw_stats->rx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS);
+	hw_stats->rx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS);
+	hw_stats->rx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS);
+	hw_stats->rx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS);
+	hw_stats->rx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS);
+
+	hw_stats->rx_unknown_ctrl
+		= hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS);
+	hw_stats->tx_good_to_sw
+		= hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS);
+	hw_stats->tx_bad_to_sw
+		= hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS);
+	hw_stats->rx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS);
+	hw_stats->rx_symbol_err
+		= hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS);
+	hw_stats->rx_fcs_err = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS);
+}
+
+/**
+ *hns_xgmac_free - free xgmac driver
+ *@mac_drv: mac driver
+ */
+static void hns_xgmac_free(void *mac_drv)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	struct dsaf_device *dsaf_dev
+		= (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+	u32 mac_id = drv->mac_id;
+
+	hns_dsaf_xge_srst_by_port(dsaf_dev, mac_id, 0);
+}
+
+/**
+ *hns_xgmac_get_info - get xgmac information
+ *@mac_drv: mac driver
+ *@mac_info:mac information
+ */
+static void hns_xgmac_get_info(void *mac_drv, struct mac_info *mac_info)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	u32 pause_time, pause_ctrl, port_mode, ctrl_val;
+
+	ctrl_val = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+	mac_info->pad_and_crc_en = dsaf_get_bit(ctrl_val, XGMAC_CTL_TX_PAD_B);
+	mac_info->auto_neg = 0;
+
+	pause_time = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG);
+	mac_info->tx_pause_time = pause_time;
+
+	port_mode = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG);
+	mac_info->port_en = dsaf_get_field(port_mode, XGMAC_PORT_MODE_TX_M,
+					   XGMAC_PORT_MODE_TX_S) &&
+				dsaf_get_field(port_mode, XGMAC_PORT_MODE_RX_M,
+					       XGMAC_PORT_MODE_RX_S);
+	mac_info->duplex = 1;
+	mac_info->speed = MAC_SPEED_10000;
+
+	pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+	mac_info->rx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
+	mac_info->tx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
+}
+
+/**
+ *hns_xgmac_get_pausefrm_cfg - get xgmac pause param
+ *@mac_drv: mac driver
+ *@rx_en:xgmac rx pause enable
+ *@tx_en:xgmac tx pause enable
+ */
+static void hns_xgmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_en, u32 *tx_en)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	u32 pause_ctrl;
+
+	pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+	*rx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
+	*tx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
+}
+
+/**
+ *hns_xgmac_get_link_status - get xgmac link status
+ *@mac_drv: mac driver
+ *@link_stat: xgmac link stat
+ */
+static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat)
+{
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+	*link_stat = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG);
+}
+
+/**
+ *hns_xgmac_get_regs - dump xgmac regs
+ *@mac_drv: mac driver
+ *@cmd:ethtool cmd
+ *@data:data for value of regs
+ */
+static void hns_xgmac_get_regs(void *mac_drv, void *data)
+{
+	u32 i = 0;
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	u32 *regs = data;
+	u64 qtmp;
+
+	/* base config registers */
+	regs[0] = dsaf_read_dev(drv, XGMAC_INT_STATUS_REG);
+	regs[1] = dsaf_read_dev(drv, XGMAC_INT_ENABLE_REG);
+	regs[2] = dsaf_read_dev(drv, XGMAC_INT_SET_REG);
+	regs[3] = dsaf_read_dev(drv, XGMAC_IERR_U_INFO_REG);
+	regs[4] = dsaf_read_dev(drv, XGMAC_OVF_INFO_REG);
+	regs[5] = dsaf_read_dev(drv, XGMAC_OVF_CNT_REG);
+	regs[6] = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG);
+	regs[7] = dsaf_read_dev(drv, XGMAC_CLK_ENABLE_REG);
+	regs[8] = dsaf_read_dev(drv, XGMAC_RESET_REG);
+	regs[9] = dsaf_read_dev(drv, XGMAC_LINK_CONTROL_REG);
+	regs[10] = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG);
+
+	regs[11] = dsaf_read_dev(drv, XGMAC_SPARE_REG);
+	regs[12] = dsaf_read_dev(drv, XGMAC_SPARE_CNT_REG);
+	regs[13] = dsaf_read_dev(drv, XGMAC_MAC_ENABLE_REG);
+	regs[14] = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+	regs[15] = dsaf_read_dev(drv, XGMAC_MAC_IPG_REG);
+	regs[16] = dsaf_read_dev(drv, XGMAC_MAC_MSG_CRC_EN_REG);
+	regs[17] = dsaf_read_dev(drv, XGMAC_MAC_MSG_IMG_REG);
+	regs[18] = dsaf_read_dev(drv, XGMAC_MAC_MSG_FC_CFG_REG);
+	regs[19] = dsaf_read_dev(drv, XGMAC_MAC_MSG_TC_CFG_REG);
+	regs[20] = dsaf_read_dev(drv, XGMAC_MAC_PAD_SIZE_REG);
+	regs[21] = dsaf_read_dev(drv, XGMAC_MAC_MIN_PKT_SIZE_REG);
+	regs[22] = dsaf_read_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG);
+	regs[23] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+	regs[24] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG);
+	regs[25] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_GAP_REG);
+	regs[26] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG);
+	regs[27] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG);
+	regs[28] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_H_REG);
+	regs[29] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_L_REG);
+	regs[30] = dsaf_read_dev(drv, XGMAC_MAC_PFC_PRI_EN_REG);
+	regs[31] = dsaf_read_dev(drv, XGMAC_MAC_1588_CTRL_REG);
+	regs[32] = dsaf_read_dev(drv, XGMAC_MAC_1588_TX_PORT_DLY_REG);
+	regs[33] = dsaf_read_dev(drv, XGMAC_MAC_1588_RX_PORT_DLY_REG);
+	regs[34] = dsaf_read_dev(drv, XGMAC_MAC_1588_ASYM_DLY_REG);
+	regs[35] = dsaf_read_dev(drv, XGMAC_MAC_1588_ADJUST_CFG_REG);
+
+	regs[36] = dsaf_read_dev(drv, XGMAC_MAC_Y1731_ETH_TYPE_REG);
+	regs[37] = dsaf_read_dev(drv, XGMAC_MAC_MIB_CONTROL_REG);
+	regs[38] = dsaf_read_dev(drv, XGMAC_MAC_WAN_RATE_ADJUST_REG);
+	regs[39] = dsaf_read_dev(drv, XGMAC_MAC_TX_ERR_MARK_REG);
+	regs[40] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG);
+	regs[41] = dsaf_read_dev(drv, XGMAC_MAC_RX_LF_RF_STATUS_REG);
+	regs[42] = dsaf_read_dev(drv, XGMAC_MAC_TX_RUNT_PKT_CNT_REG);
+	regs[43] = dsaf_read_dev(drv, XGMAC_MAC_RX_RUNT_PKT_CNT_REG);
+	regs[44] = dsaf_read_dev(drv, XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG);
+	regs[45] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG);
+	regs[46] = dsaf_read_dev(drv, XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG);
+	regs[47] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_MSG_CNT_REG);
+	regs[48] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_EFD_CNT_REG);
+	regs[49] = dsaf_read_dev(drv, XGMAC_MAC_ERR_INFO_REG);
+	regs[50] = dsaf_read_dev(drv, XGMAC_MAC_DBG_INFO_REG);
+
+	regs[51] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SYNC_THD_REG);
+	regs[52] = dsaf_read_dev(drv, XGMAC_PCS_STATUS1_REG);
+	regs[53] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS1_REG);
+	regs[54] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS2_REG);
+	regs[55] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_0_REG);
+	regs[56] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_1_REG);
+	regs[57] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_0_REG);
+	regs[58] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_1_REG);
+	regs[59] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_CONTROL_REG);
+	regs[60] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_ERR_CNT_REG);
+	regs[61] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO_REG);
+	regs[62] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO1_REG);
+	regs[63] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO2_REG);
+	regs[64] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO3_REG);
+
+	regs[65] = dsaf_read_dev(drv, XGMAC_PMA_ENABLE_REG);
+	regs[66] = dsaf_read_dev(drv, XGMAC_PMA_CONTROL_REG);
+	regs[67] = dsaf_read_dev(drv, XGMAC_PMA_SIGNAL_STATUS_REG);
+	regs[68] = dsaf_read_dev(drv, XGMAC_PMA_DBG_INFO_REG);
+	regs[69] = dsaf_read_dev(drv, XGMAC_PMA_FEC_ABILITY_REG);
+	regs[70] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG);
+	regs[71] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG);
+	regs[72] = dsaf_read_dev(drv, XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG);
+
+	/* status registers */
+#define hns_xgmac_cpy_q(p, q) \
+	do {\
+		*(p) = (u32)(q);\
+		*((p) + 1) = (u32)((q) >> 32);\
+	} while (0)
+
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT);
+	hns_xgmac_cpy_q(&regs[73], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE);
+	hns_xgmac_cpy_q(&regs[75], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN);
+	hns_xgmac_cpy_q(&regs[77], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS);
+	hns_xgmac_cpy_q(&regs[79], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS);
+	hns_xgmac_cpy_q(&regs[81], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS);
+	hns_xgmac_cpy_q(&regs[83], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS);
+	hns_xgmac_cpy_q(&regs[85], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS);
+	hns_xgmac_cpy_q(&regs[87], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS);
+	hns_xgmac_cpy_q(&regs[89], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS);
+	hns_xgmac_cpy_q(&regs[91], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK);
+	hns_xgmac_cpy_q(&regs[93], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE);
+	hns_xgmac_cpy_q(&regs[95], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER);
+	hns_xgmac_cpy_q(&regs[97], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS);
+	hns_xgmac_cpy_q(&regs[99], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS);
+	hns_xgmac_cpy_q(&regs[101], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS);
+	hns_xgmac_cpy_q(&regs[103], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS);
+	hns_xgmac_cpy_q(&regs[105], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS);
+	hns_xgmac_cpy_q(&regs[107], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS);
+	hns_xgmac_cpy_q(&regs[109], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS);
+	hns_xgmac_cpy_q(&regs[111], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[113], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[115], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[117], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[119], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[121], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[123], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[125], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[127], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS);
+	hns_xgmac_cpy_q(&regs[129], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS);
+	hns_xgmac_cpy_q(&regs[131], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS);
+	hns_xgmac_cpy_q(&regs[133], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS);
+	hns_xgmac_cpy_q(&regs[135], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS);
+	hns_xgmac_cpy_q(&regs[137], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS);
+	hns_xgmac_cpy_q(&regs[139], qtmp);
+
+	/* RX */
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT);
+	hns_xgmac_cpy_q(&regs[141], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE);
+	hns_xgmac_cpy_q(&regs[143], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN);
+	hns_xgmac_cpy_q(&regs[145], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS);
+	hns_xgmac_cpy_q(&regs[147], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS);
+	hns_xgmac_cpy_q(&regs[149], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS);
+	hns_xgmac_cpy_q(&regs[151], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS);
+	hns_xgmac_cpy_q(&regs[153], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS);
+	hns_xgmac_cpy_q(&regs[155], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS);
+	hns_xgmac_cpy_q(&regs[157], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS);
+	hns_xgmac_cpy_q(&regs[159], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK);
+	hns_xgmac_cpy_q(&regs[161], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE);
+	hns_xgmac_cpy_q(&regs[163], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER);
+	hns_xgmac_cpy_q(&regs[165], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS);
+	hns_xgmac_cpy_q(&regs[167], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS);
+	hns_xgmac_cpy_q(&regs[169], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS);
+	hns_xgmac_cpy_q(&regs[171], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS);
+	hns_xgmac_cpy_q(&regs[173], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS);
+	hns_xgmac_cpy_q(&regs[175], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS);
+	hns_xgmac_cpy_q(&regs[177], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS);
+	hns_xgmac_cpy_q(&regs[179], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[181], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[183], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[185], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[187], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[189], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[191], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[193], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS);
+	hns_xgmac_cpy_q(&regs[195], qtmp);
+
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS);
+	hns_xgmac_cpy_q(&regs[197], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS);
+	hns_xgmac_cpy_q(&regs[199], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS);
+	hns_xgmac_cpy_q(&regs[201], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS);
+	hns_xgmac_cpy_q(&regs[203], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS);
+	hns_xgmac_cpy_q(&regs[205], qtmp);
+	qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS);
+	hns_xgmac_cpy_q(&regs[207], qtmp);
+
+	/* mark end of mac regs */
+	for (i = 208; i < 214; i++)
+		regs[i] = 0xaaaaaaaa;
+}
+
+/**
+ *hns_xgmac_get_stats - get xgmac statistic
+ *@mac_drv: mac driver
+ *@data:data for value of stats regs
+ */
+static void hns_xgmac_get_stats(void *mac_drv, u64 *data)
+{
+	u32 i;
+	u64 *buf = data;
+	struct mac_driver *drv = (struct mac_driver *)mac_drv;
+	struct mac_hw_stats *hw_stats = NULL;
+
+	hw_stats = &drv->mac_cb->hw_stats;
+
+	for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) {
+		buf[i] = DSAF_STATS_READ(hw_stats,
+			g_xgmac_stats_string[i].offset);
+	}
+}
+
+/**
+ *hns_xgmac_get_strings - get xgmac strings name
+ *@stringset: type of values in data
+ *@data:data for value of string name
+ */
+static void hns_xgmac_get_strings(u32 stringset, u8 *data)
+{
+	char *buff = (char *)data;
+	u32 i;
+
+	if (stringset != ETH_SS_STATS)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) {
+		snprintf(buff, ETH_GSTRING_LEN, g_xgmac_stats_string[i].desc);
+		buff = buff + ETH_GSTRING_LEN;
+	}
+}
+
+/**
+ *hns_xgmac_get_sset_count - get xgmac string set count
+ *@stringset: type of values in data
+ *return xgmac string set count
+ */
+static int hns_xgmac_get_sset_count(int stringset)
+{
+	if (stringset == ETH_SS_STATS)
+		return ARRAY_SIZE(g_xgmac_stats_string);
+
+	return 0;
+}
+
+/**
+ *hns_xgmac_get_regs_count - get xgmac regs count
+ *return xgmac regs count
+ */
+static int hns_xgmac_get_regs_count(void)
+{
+	return ETH_XGMAC_DUMP_NUM;
+}
+
+void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
+{
+	struct mac_driver *mac_drv;
+
+	mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL);
+	if (!mac_drv)
+		return NULL;
+
+	mac_drv->mac_init = hns_xgmac_init;
+	mac_drv->mac_enable = hns_xgmac_enable;
+	mac_drv->mac_disable = hns_xgmac_disable;
+
+	mac_drv->mac_id = mac_param->mac_id;
+	mac_drv->mac_mode = mac_param->mac_mode;
+	mac_drv->io_base = mac_param->vaddr;
+	mac_drv->dev = mac_param->dev;
+	mac_drv->mac_cb = mac_cb;
+
+	mac_drv->set_mac_addr = hns_xgmac_set_pausefrm_mac_addr;
+	mac_drv->set_an_mode = NULL;
+	mac_drv->config_loopback = NULL;
+	mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc;
+	mac_drv->config_half_duplex = NULL;
+	mac_drv->set_rx_ignore_pause_frames =
+		hns_xgmac_set_rx_ignore_pause_frames;
+	mac_drv->mac_get_id = hns_xgmac_get_id;
+	mac_drv->mac_free = hns_xgmac_free;
+	mac_drv->adjust_link = NULL;
+	mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames;
+	mac_drv->config_max_frame_length = hns_xgmac_config_max_frame_length;
+	mac_drv->mac_pausefrm_cfg = hns_xgmac_pausefrm_cfg;
+	mac_drv->autoneg_stat = NULL;
+	mac_drv->get_info = hns_xgmac_get_info;
+	mac_drv->get_pause_enable = hns_xgmac_get_pausefrm_cfg;
+	mac_drv->get_link_status = hns_xgmac_get_link_status;
+	mac_drv->get_regs = hns_xgmac_get_regs;
+	mac_drv->get_ethtool_stats = hns_xgmac_get_stats;
+	mac_drv->get_sset_count = hns_xgmac_get_sset_count;
+	mac_drv->get_regs_count = hns_xgmac_get_regs_count;
+	mac_drv->get_strings = hns_xgmac_get_strings;
+	mac_drv->update_stats = hns_xgmac_update_stats;
+
+	return (void *)mac_drv;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
new file mode 100644
index 0000000..139f729
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_XGMAC_H
+#define _HNS_XGMAC_H
+
+#define ETH_XGMAC_DUMP_NUM		(214)
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
new file mode 100644
index 0000000..ce7f2e0
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -0,0 +1,1629 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+
+#include "hnae.h"
+#include "hns_enet.h"
+
+#define NIC_MAX_Q_PER_VF 16
+#define HNS_NIC_TX_TIMEOUT (5 * HZ)
+
+#define SERVICE_TIMER_HZ (1 * HZ)
+
+#define NIC_TX_CLEAN_MAX_NUM 256
+#define NIC_RX_CLEAN_MAX_NUM 64
+
+#define RCB_IRQ_NOT_INITED 0
+#define RCB_IRQ_INITED 1
+
+static void fill_desc(struct hnae_ring *ring, void *priv,
+		      int size, dma_addr_t dma, int frag_end,
+		      int buf_num, enum hns_desc_type type)
+{
+	struct hnae_desc *desc = &ring->desc[ring->next_to_use];
+	struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+	struct sk_buff *skb;
+	__be16 protocol;
+	u32 ip_offset;
+	u32 asid_bufnum_pid = 0;
+	u32 flag_ipoffset = 0;
+
+	desc_cb->priv = priv;
+	desc_cb->length = size;
+	desc_cb->dma = dma;
+	desc_cb->type = type;
+
+	desc->addr = cpu_to_le64(dma);
+	desc->tx.send_size = cpu_to_le16((u16)size);
+
+	/*config bd buffer end */
+	flag_ipoffset |= 1 << HNS_TXD_VLD_B;
+
+	asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
+
+	if (type == DESC_TYPE_SKB) {
+		skb = (struct sk_buff *)priv;
+
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			protocol = skb->protocol;
+			ip_offset = ETH_HLEN;
+
+			/*if it is a SW VLAN check the next protocol*/
+			if (protocol == htons(ETH_P_8021Q)) {
+				ip_offset += VLAN_HLEN;
+				protocol = vlan_get_protocol(skb);
+				skb->protocol = protocol;
+			}
+
+			if (skb->protocol == htons(ETH_P_IP)) {
+				flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
+				/* check for tcp/udp header */
+				flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
+
+			} else if (skb->protocol == htons(ETH_P_IPV6)) {
+				/* ipv6 has not l3 cs, check for L4 header */
+				flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
+			}
+
+			flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
+		}
+	}
+
+	flag_ipoffset |= frag_end << HNS_TXD_FE_B;
+
+	desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
+	desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
+
+	ring_ptr_move_fw(ring, next_to_use);
+}
+
+static void unfill_desc(struct hnae_ring *ring)
+{
+	ring_ptr_move_bw(ring, next_to_use);
+}
+
+int hns_nic_net_xmit_hw(struct net_device *ndev,
+			struct sk_buff *skb,
+			struct hns_nic_ring_data *ring_data)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct device *dev = priv->dev;
+	struct hnae_ring *ring = ring_data->ring;
+	struct netdev_queue *dev_queue;
+	struct skb_frag_struct *frag;
+	int buf_num;
+	dma_addr_t dma;
+	int size, next_to_use;
+	int i, j;
+	struct sk_buff *new_skb;
+
+	assert(ring->max_desc_num_per_pkt <= ring->desc_num);
+
+	/* no. of segments (plus a header) */
+	buf_num = skb_shinfo(skb)->nr_frags + 1;
+
+	if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
+		if (ring_space(ring) < 1) {
+			ring->stats.tx_busy++;
+			goto out_net_tx_busy;
+		}
+
+		new_skb = skb_copy(skb, GFP_ATOMIC);
+		if (!new_skb) {
+			ring->stats.sw_err_cnt++;
+			netdev_err(ndev, "no memory to xmit!\n");
+			goto out_err_tx_ok;
+		}
+
+		dev_kfree_skb_any(skb);
+		skb = new_skb;
+		buf_num = 1;
+		assert(skb_shinfo(skb)->nr_frags == 1);
+	} else if (buf_num > ring_space(ring)) {
+		ring->stats.tx_busy++;
+		goto out_net_tx_busy;
+	}
+	next_to_use = ring->next_to_use;
+
+	/* fill the first part */
+	size = skb_headlen(skb);
+	dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, dma)) {
+		netdev_err(ndev, "TX head DMA map failed\n");
+		ring->stats.sw_err_cnt++;
+		goto out_err_tx_ok;
+	}
+	fill_desc(ring, skb, size, dma, buf_num == 1 ? 1 : 0, buf_num,
+		  DESC_TYPE_SKB);
+
+	/* fill the fragments */
+	for (i = 1; i < buf_num; i++) {
+		frag = &skb_shinfo(skb)->frags[i - 1];
+		size = skb_frag_size(frag);
+		dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma)) {
+			netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
+			ring->stats.sw_err_cnt++;
+			goto out_map_frag_fail;
+		}
+		fill_desc(ring, skb_frag_page(frag), size, dma,
+			  buf_num - 1 == i ? 1 : 0, buf_num, DESC_TYPE_PAGE);
+	}
+
+	/*complete translate all packets*/
+	dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
+	netdev_tx_sent_queue(dev_queue, skb->len);
+
+	wmb(); /* commit all data before submit */
+	assert(skb->queue_mapping < priv->ae_handle->q_num);
+	hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
+	ring->stats.tx_pkts++;
+	ring->stats.tx_bytes += skb->len;
+
+	return NETDEV_TX_OK;
+
+out_map_frag_fail:
+
+	for (j = i - 1; j > 0; j--) {
+		unfill_desc(ring);
+		next_to_use = ring->next_to_use;
+		dma_unmap_page(dev, ring->desc_cb[next_to_use].dma,
+			       ring->desc_cb[next_to_use].length,
+			       DMA_TO_DEVICE);
+	}
+
+	unfill_desc(ring);
+	next_to_use = ring->next_to_use;
+	dma_unmap_single(dev, ring->desc_cb[next_to_use].dma,
+			 ring->desc_cb[next_to_use].length, DMA_TO_DEVICE);
+
+out_err_tx_ok:
+
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+
+out_net_tx_busy:
+
+	netif_stop_subqueue(ndev, skb->queue_mapping);
+
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it.
+	 */
+	smp_mb();
+	return NETDEV_TX_BUSY;
+}
+
+/**
+ * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
+ * @data: pointer to the start of the headers
+ * @max: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
+					unsigned int max_size)
+{
+	unsigned char *network;
+	u8 hlen;
+
+	/* this should never happen, but better safe than sorry */
+	if (max_size < ETH_HLEN)
+		return max_size;
+
+	/* initialize network frame pointer */
+	network = data;
+
+	/* set first protocol and move network header forward */
+	network += ETH_HLEN;
+
+	/* handle any vlan tag if present */
+	if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
+		== HNS_RX_FLAG_VLAN_PRESENT) {
+		if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
+			return max_size;
+
+		network += VLAN_HLEN;
+	}
+
+	/* handle L3 protocols */
+	if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
+		== HNS_RX_FLAG_L3ID_IPV4) {
+		if ((typeof(max_size))(network - data) >
+		    (max_size - sizeof(struct iphdr)))
+			return max_size;
+
+		/* access ihl as a u8 to avoid unaligned access on ia64 */
+		hlen = (network[0] & 0x0F) << 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct iphdr))
+			return network - data;
+
+		/* record next protocol if header is present */
+	} else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
+		== HNS_RX_FLAG_L3ID_IPV6) {
+		if ((typeof(max_size))(network - data) >
+		    (max_size - sizeof(struct ipv6hdr)))
+			return max_size;
+
+		/* record next protocol */
+		hlen = sizeof(struct ipv6hdr);
+	} else {
+		return network - data;
+	}
+
+	/* relocate pointer to start of L4 header */
+	network += hlen;
+
+	/* finally sort out TCP/UDP */
+	if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
+		== HNS_RX_FLAG_L4ID_TCP) {
+		if ((typeof(max_size))(network - data) >
+		    (max_size - sizeof(struct tcphdr)))
+			return max_size;
+
+		/* access doff as a u8 to avoid unaligned access on ia64 */
+		hlen = (network[12] & 0xF0) >> 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct tcphdr))
+			return network - data;
+
+		network += hlen;
+	} else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
+		== HNS_RX_FLAG_L4ID_UDP) {
+		if ((typeof(max_size))(network - data) >
+		    (max_size - sizeof(struct udphdr)))
+			return max_size;
+
+		network += sizeof(struct udphdr);
+	}
+
+	/* If everything has gone correctly network should be the
+	 * data section of the packet and will be the end of the header.
+	 * If not then it probably represents the end of the last recognized
+	 * header.
+	 */
+	if ((typeof(max_size))(network - data) < max_size)
+		return network - data;
+	else
+		return max_size;
+}
+
+static void
+hns_nic_reuse_page(struct hnae_desc_cb *desc_cb, int tsize, int last_offset)
+{
+	 /* avoid re-using remote pages,flag default unreuse */
+	if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) {
+		/* move offset up to the next cache line */
+		desc_cb->page_offset += tsize;
+
+		if (desc_cb->page_offset <= last_offset) {
+			desc_cb->reuse_flag = 1;
+			/* bump ref count on page before it is given*/
+			get_page(desc_cb->priv);
+		}
+	}
+}
+
+static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
+			       struct sk_buff **out_skb, int *out_bnum)
+{
+	struct hnae_ring *ring = ring_data->ring;
+	struct net_device *ndev = ring_data->napi.dev;
+	struct sk_buff *skb;
+	struct hnae_desc *desc;
+	struct hnae_desc_cb *desc_cb;
+	unsigned char *va;
+	int bnum, length, size, i, truesize, last_offset;
+	int pull_len;
+	u32 bnum_flag;
+
+	last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
+	desc = &ring->desc[ring->next_to_clean];
+	desc_cb = &ring->desc_cb[ring->next_to_clean];
+	length = le16_to_cpu(desc->rx.pkt_len);
+	bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
+	bnum = hnae_get_field(bnum_flag, HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
+	*out_bnum = bnum;
+	va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+
+	skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE);
+	if (unlikely(!skb)) {
+		netdev_err(ndev, "alloc rx skb fail\n");
+		ring->stats.sw_err_cnt++;
+		return -ENOMEM;
+	}
+
+	if (length <= HNS_RX_HEAD_SIZE) {
+		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+		/* we can reuse buffer as-is, just make sure it is local */
+		if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+			desc_cb->reuse_flag = 1;
+		else /* this page cannot be reused so discard it */
+			put_page(desc_cb->priv);
+
+		ring_ptr_move_fw(ring, next_to_clean);
+
+		if (unlikely(bnum != 1)) { /* check err*/
+			*out_bnum = 1;
+			goto out_bnum_err;
+		}
+	} else {
+		ring->stats.seg_pkt_cnt++;
+
+		pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
+		memcpy(__skb_put(skb, pull_len), va,
+		       ALIGN(pull_len, sizeof(long)));
+
+		size = le16_to_cpu(desc->rx.size);
+		truesize = ALIGN(size, L1_CACHE_BYTES);
+		skb_add_rx_frag(skb, 0, desc_cb->priv,
+				desc_cb->page_offset + pull_len,
+				size - pull_len, truesize - pull_len);
+
+		hns_nic_reuse_page(desc_cb, truesize, last_offset);
+		ring_ptr_move_fw(ring, next_to_clean);
+
+		if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
+			*out_bnum = 1;
+			goto out_bnum_err;
+		}
+		for (i = 1; i < bnum; i++) {
+			desc = &ring->desc[ring->next_to_clean];
+			desc_cb = &ring->desc_cb[ring->next_to_clean];
+			size = le16_to_cpu(desc->rx.size);
+			truesize = ALIGN(size, L1_CACHE_BYTES);
+			skb_add_rx_frag(skb, i, desc_cb->priv,
+					desc_cb->page_offset,
+					size, truesize);
+
+			hns_nic_reuse_page(desc_cb, truesize, last_offset);
+			ring_ptr_move_fw(ring, next_to_clean);
+		}
+	}
+
+	/* check except process, free skb and jump the desc */
+	if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
+out_bnum_err:
+		*out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
+		netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
+			   bnum, ring->max_desc_num_per_pkt,
+			   length, (int)MAX_SKB_FRAGS,
+			   ((u64 *)desc)[0], ((u64 *)desc)[1]);
+		ring->stats.err_bd_num++;
+		dev_kfree_skb_any(skb);
+		return -EDOM;
+	}
+
+	bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
+
+	if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
+		netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
+			   ((u64 *)desc)[0], ((u64 *)desc)[1]);
+		ring->stats.non_vld_descs++;
+		dev_kfree_skb_any(skb);
+		return -EINVAL;
+	}
+
+	if (unlikely((!desc->rx.pkt_len) ||
+		     hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
+		ring->stats.err_pkt_len++;
+		dev_kfree_skb_any(skb);
+		return -EFAULT;
+	}
+
+	if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
+		ring->stats.l2_err++;
+		dev_kfree_skb_any(skb);
+		return -EFAULT;
+	}
+
+	ring->stats.rx_pkts++;
+	ring->stats.rx_bytes += skb->len;
+
+	if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) ||
+		     hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) {
+		ring->stats.l3l4_csum_err++;
+		return 0;
+	}
+
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	return 0;
+}
+
+static void
+hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
+{
+	int i, ret;
+	struct hnae_desc_cb res_cbs;
+	struct hnae_desc_cb *desc_cb;
+	struct hnae_ring *ring = ring_data->ring;
+	struct net_device *ndev = ring_data->napi.dev;
+
+	for (i = 0; i < cleand_count; i++) {
+		desc_cb = &ring->desc_cb[ring->next_to_use];
+		if (desc_cb->reuse_flag) {
+			ring->stats.reuse_pg_cnt++;
+			hnae_reuse_buffer(ring, ring->next_to_use);
+		} else {
+			ret = hnae_reserve_buffer_map(ring, &res_cbs);
+			if (ret) {
+				ring->stats.sw_err_cnt++;
+				netdev_err(ndev, "hnae reserve buffer map failed.\n");
+				break;
+			}
+			hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
+		}
+
+		ring_ptr_move_fw(ring, next_to_use);
+	}
+
+	wmb(); /* make all data has been write before submit */
+	writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
+}
+
+/* return error number for error or number of desc left to take
+ */
+static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
+			      struct sk_buff *skb)
+{
+	struct net_device *ndev = ring_data->napi.dev;
+
+	skb->protocol = eth_type_trans(skb, ndev);
+	(void)napi_gro_receive(&ring_data->napi, skb);
+	ndev->last_rx = jiffies;
+}
+
+static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
+			       int budget, void *v)
+{
+	struct hnae_ring *ring = ring_data->ring;
+	struct sk_buff *skb;
+	int num, bnum, ex_num;
+#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
+	int recv_pkts, recv_bds, clean_count, err;
+
+	num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+	rmb(); /* make sure num taken effect before the other data is touched */
+
+	recv_pkts = 0, recv_bds = 0, clean_count = 0;
+recv:
+	while (recv_pkts < budget && recv_bds < num) {
+		/* reuse or realloc buffers*/
+		if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+			hns_nic_alloc_rx_buffers(ring_data, clean_count);
+			clean_count = 0;
+		}
+
+		/* poll one pkg*/
+		err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
+		if (unlikely(!skb)) /* this fault cannot be repaired */
+			break;
+
+		recv_bds += bnum;
+		clean_count += bnum;
+		if (unlikely(err)) {  /* do jump the err */
+			recv_pkts++;
+			continue;
+		}
+
+		/* do update ip stack process*/
+		((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
+							ring_data, skb);
+		recv_pkts++;
+	}
+
+	/* make all data has been write before submit */
+	if (clean_count > 0) {
+		hns_nic_alloc_rx_buffers(ring_data, clean_count);
+		clean_count = 0;
+	}
+
+	if (recv_pkts < budget) {
+		ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+		rmb(); /*complete read rx ring bd number*/
+		if (ex_num > 0) {
+			num += ex_num;
+			goto recv;
+		}
+	}
+
+	return recv_pkts;
+}
+
+static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
+{
+	struct hnae_ring *ring = ring_data->ring;
+	int num = 0;
+
+	/* for hardware bug fixed */
+	num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+
+	if (num > 0) {
+		ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+			ring_data->ring, 1);
+
+		napi_schedule(&ring_data->napi);
+	}
+}
+
+static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
+					    int *bytes, int *pkts)
+{
+	struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+
+	(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
+	(*bytes) += desc_cb->length;
+	/* desc_cb will be cleaned, after hnae_free_buffer_detach*/
+	hnae_free_buffer_detach(ring, ring->next_to_clean);
+
+	ring_ptr_move_fw(ring, next_to_clean);
+}
+
+static int is_valid_clean_head(struct hnae_ring *ring, int h)
+{
+	int u = ring->next_to_use;
+	int c = ring->next_to_clean;
+
+	if (unlikely(h > ring->desc_num))
+		return 0;
+
+	assert(u > 0 && u < ring->desc_num);
+	assert(c > 0 && c < ring->desc_num);
+	assert(u != c && h != c); /* must be checked before call this func */
+
+	return u > c ? (h > c && h <= u) : (h > c || h <= u);
+}
+
+/* netif_tx_lock will turn down the performance, set only when necessary */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
+#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
+#else
+#define NETIF_TX_LOCK(ndev)
+#define NETIF_TX_UNLOCK(ndev)
+#endif
+/* reclaim all desc in one budget
+ * return error or number of desc left
+ */
+static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
+			       int budget, void *v)
+{
+	struct hnae_ring *ring = ring_data->ring;
+	struct net_device *ndev = ring_data->napi.dev;
+	struct netdev_queue *dev_queue;
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	int head;
+	int bytes, pkts;
+
+	NETIF_TX_LOCK(ndev);
+
+	head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+	rmb(); /* make sure head is ready before touch any data */
+
+	if (is_ring_empty(ring) || head == ring->next_to_clean) {
+		NETIF_TX_UNLOCK(ndev);
+		return 0; /* no data to poll */
+	}
+
+	if (!is_valid_clean_head(ring, head)) {
+		netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
+			   ring->next_to_use, ring->next_to_clean);
+		ring->stats.io_err_cnt++;
+		NETIF_TX_UNLOCK(ndev);
+		return -EIO;
+	}
+
+	bytes = 0;
+	pkts = 0;
+	while (head != ring->next_to_clean)
+		hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
+
+	NETIF_TX_UNLOCK(ndev);
+
+	dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+	netdev_tx_completed_queue(dev_queue, pkts, bytes);
+
+	if (unlikely(pkts && netif_carrier_ok(ndev) &&
+		     (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+		if (netif_tx_queue_stopped(dev_queue) &&
+		    !test_bit(NIC_STATE_DOWN, &priv->state)) {
+			netif_tx_wake_queue(dev_queue);
+			ring->stats.restart_queue++;
+		}
+	}
+	return 0;
+}
+
+static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
+{
+	struct hnae_ring *ring = ring_data->ring;
+	int head = ring->next_to_clean;
+
+	/* for hardware bug fixed */
+	head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+
+	if (head != ring->next_to_clean) {
+		ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+			ring_data->ring, 1);
+
+		napi_schedule(&ring_data->napi);
+	}
+}
+
+static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
+{
+	struct hnae_ring *ring = ring_data->ring;
+	struct net_device *ndev = ring_data->napi.dev;
+	struct netdev_queue *dev_queue;
+	int head;
+	int bytes, pkts;
+
+	NETIF_TX_LOCK(ndev);
+
+	head = ring->next_to_use; /* ntu :soft setted ring position*/
+	bytes = 0;
+	pkts = 0;
+	while (head != ring->next_to_clean)
+		hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
+
+	NETIF_TX_UNLOCK(ndev);
+
+	dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+	netdev_tx_reset_queue(dev_queue);
+}
+
+static int hns_nic_common_poll(struct napi_struct *napi, int budget)
+{
+	struct hns_nic_ring_data *ring_data =
+		container_of(napi, struct hns_nic_ring_data, napi);
+	int clean_complete = ring_data->poll_one(
+				ring_data, budget, ring_data->ex_process);
+
+	if (clean_complete >= 0 && clean_complete < budget) {
+		napi_complete(napi);
+		ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+			ring_data->ring, 0);
+
+		ring_data->fini_process(ring_data);
+	}
+
+	return clean_complete;
+}
+
+static irqreturn_t hns_irq_handle(int irq, void *dev)
+{
+	struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
+
+	ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+		ring_data->ring, 1);
+	napi_schedule(&ring_data->napi);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
+ *@ndev: net device
+ */
+static void hns_nic_adjust_link(struct net_device *ndev)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct hnae_handle *h = priv->ae_handle;
+
+	h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex);
+}
+
+/**
+ *hns_nic_init_phy - init phy
+ *@ndev: net device
+ *@h: ae handle
+ * Return 0 on success, negative on failure
+ */
+int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct phy_device *phy_dev = NULL;
+
+	if (!h->phy_node)
+		return 0;
+
+	if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+		phy_dev = of_phy_connect(ndev, h->phy_node,
+					 hns_nic_adjust_link, 0, h->phy_if);
+	else
+		phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if);
+
+	if (unlikely(!phy_dev) || IS_ERR(phy_dev))
+		return !phy_dev ? -ENODEV : PTR_ERR(phy_dev);
+
+	phy_dev->supported &= h->if_support;
+	phy_dev->advertising = phy_dev->supported;
+
+	if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
+		phy_dev->autoneg = false;
+
+	priv->phy = phy_dev;
+
+	return 0;
+}
+
+static int hns_nic_ring_open(struct net_device *netdev, int idx)
+{
+	struct hns_nic_priv *priv = netdev_priv(netdev);
+	struct hnae_handle *h = priv->ae_handle;
+
+	napi_enable(&priv->ring_data[idx].napi);
+
+	enable_irq(priv->ring_data[idx].ring->irq);
+	h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
+
+	return 0;
+}
+
+static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct hnae_handle *h = priv->ae_handle;
+	struct sockaddr *mac_addr = p;
+	int ret;
+
+	if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
+	if (ret) {
+		netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
+		return ret;
+	}
+
+	memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
+
+	return 0;
+}
+
+void hns_nic_update_stats(struct net_device *netdev)
+{
+	struct hns_nic_priv *priv = netdev_priv(netdev);
+	struct hnae_handle *h = priv->ae_handle;
+
+	h->dev->ops->update_stats(h, &netdev->stats);
+}
+
+/* set mac addr if it is configed. or leave it to the AE driver */
+static void hns_init_mac_addr(struct net_device *ndev)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct device_node *node = priv->dev->of_node;
+	const void *mac_addr_temp;
+
+	mac_addr_temp = of_get_mac_address(node);
+	if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) {
+		memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len);
+	} else {
+		eth_hw_addr_random(ndev);
+		dev_warn(priv->dev, "No valid mac, use random mac %pM",
+			 ndev->dev_addr);
+	}
+}
+
+static void hns_nic_ring_close(struct net_device *netdev, int idx)
+{
+	struct hns_nic_priv *priv = netdev_priv(netdev);
+	struct hnae_handle *h = priv->ae_handle;
+
+	h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
+	disable_irq(priv->ring_data[idx].ring->irq);
+
+	napi_disable(&priv->ring_data[idx].napi);
+}
+
+static int hns_nic_init_irq(struct hns_nic_priv *priv)
+{
+	struct hnae_handle *h = priv->ae_handle;
+	struct hns_nic_ring_data *rd;
+	int i;
+	int ret;
+	int cpu;
+	cpumask_t mask;
+
+	for (i = 0; i < h->q_num * 2; i++) {
+		rd = &priv->ring_data[i];
+
+		if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
+			break;
+
+		snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
+			 "%s-%s%d", priv->netdev->name,
+			 (i < h->q_num ? "tx" : "rx"), rd->queue_index);
+
+		rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
+
+		ret = request_irq(rd->ring->irq,
+				  hns_irq_handle, 0, rd->ring->ring_name, rd);
+		if (ret) {
+			netdev_err(priv->netdev, "request irq(%d) fail\n",
+				   rd->ring->irq);
+			return ret;
+		}
+		disable_irq(rd->ring->irq);
+		rd->ring->irq_init_flag = RCB_IRQ_INITED;
+
+		/*set cpu affinity*/
+		if (cpu_online(rd->queue_index)) {
+			cpumask_clear(&mask);
+			cpu = rd->queue_index;
+			cpumask_set_cpu(cpu, &mask);
+			irq_set_affinity_hint(rd->ring->irq, &mask);
+		}
+	}
+
+	return 0;
+}
+
+static int hns_nic_net_up(struct net_device *ndev)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct hnae_handle *h = priv->ae_handle;
+	int i, j, k;
+	int ret;
+
+	ret = hns_nic_init_irq(priv);
+	if (ret != 0) {
+		netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < h->q_num * 2; i++) {
+		ret = hns_nic_ring_open(ndev, i);
+		if (ret)
+			goto out_has_some_queues;
+	}
+
+	for (k = 0; k < h->q_num; k++)
+		h->dev->ops->toggle_queue_status(h->qs[k], 1);
+
+	ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
+	if (ret)
+		goto out_set_mac_addr_err;
+
+	ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
+	if (ret)
+		goto out_start_err;
+
+	if (priv->phy)
+		phy_start(priv->phy);
+
+	clear_bit(NIC_STATE_DOWN, &priv->state);
+	(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
+
+	return 0;
+
+out_start_err:
+	netif_stop_queue(ndev);
+out_set_mac_addr_err:
+	for (k = 0; k < h->q_num; k++)
+		h->dev->ops->toggle_queue_status(h->qs[k], 0);
+out_has_some_queues:
+	for (j = i - 1; j >= 0; j--)
+		hns_nic_ring_close(ndev, j);
+
+	set_bit(NIC_STATE_DOWN, &priv->state);
+
+	return ret;
+}
+
+static void hns_nic_net_down(struct net_device *ndev)
+{
+	int i;
+	struct hnae_ae_ops *ops;
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+
+	if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
+		return;
+
+	(void)del_timer_sync(&priv->service_timer);
+	netif_tx_stop_all_queues(ndev);
+	netif_carrier_off(ndev);
+	netif_tx_disable(ndev);
+	priv->link = 0;
+
+	if (priv->phy)
+		phy_stop(priv->phy);
+
+	ops = priv->ae_handle->dev->ops;
+
+	if (ops->stop)
+		ops->stop(priv->ae_handle);
+
+	netif_tx_stop_all_queues(ndev);
+
+	for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
+		hns_nic_ring_close(ndev, i);
+		hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
+
+		/* clean tx buffers*/
+		hns_nic_tx_clr_all_bufs(priv->ring_data + i);
+	}
+}
+
+void hns_nic_net_reset(struct net_device *ndev)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct hnae_handle *handle = priv->ae_handle;
+
+	while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
+		usleep_range(1000, 2000);
+
+	(void)hnae_reinit_handle(handle);
+
+	clear_bit(NIC_STATE_RESETTING, &priv->state);
+}
+
+void hns_nic_net_reinit(struct net_device *netdev)
+{
+	struct hns_nic_priv *priv = netdev_priv(netdev);
+
+	priv->netdev->trans_start = jiffies;
+	while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
+		usleep_range(1000, 2000);
+
+	hns_nic_net_down(netdev);
+	hns_nic_net_reset(netdev);
+	(void)hns_nic_net_up(netdev);
+	clear_bit(NIC_STATE_REINITING, &priv->state);
+}
+
+static int hns_nic_net_open(struct net_device *ndev)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct hnae_handle *h = priv->ae_handle;
+	int ret;
+
+	if (test_bit(NIC_STATE_TESTING, &priv->state))
+		return -EBUSY;
+
+	priv->link = 0;
+	netif_carrier_off(ndev);
+
+	ret = netif_set_real_num_tx_queues(ndev, h->q_num);
+	if (ret < 0) {
+		netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
+			   ret);
+		return ret;
+	}
+
+	ret = netif_set_real_num_rx_queues(ndev, h->q_num);
+	if (ret < 0) {
+		netdev_err(ndev,
+			   "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
+		return ret;
+	}
+
+	ret = hns_nic_net_up(ndev);
+	if (ret) {
+		netdev_err(ndev,
+			   "hns net up fail, ret=%d!\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int hns_nic_net_stop(struct net_device *ndev)
+{
+	hns_nic_net_down(ndev);
+
+	return 0;
+}
+
+static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
+static void hns_nic_net_timeout(struct net_device *ndev)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+
+	hns_tx_timeout_reset(priv);
+}
+
+static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
+			    int cmd)
+{
+	struct hns_nic_priv *priv = netdev_priv(netdev);
+	struct phy_device *phy_dev = priv->phy;
+
+	if (!netif_running(netdev))
+		return -EINVAL;
+
+	if (!phy_dev)
+		return -ENOTSUPP;
+
+	return phy_mii_ioctl(phy_dev, ifr, cmd);
+}
+
+/* use only for netconsole to poll with the device without interrupt */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void hns_nic_poll_controller(struct net_device *ndev)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	unsigned long flags;
+	int i;
+
+	local_irq_save(flags);
+	for (i = 0; i < priv->ae_handle->q_num * 2; i++)
+		napi_schedule(&priv->ring_data[i].napi);
+	local_irq_restore(flags);
+}
+#endif
+
+static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
+				    struct net_device *ndev)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	int ret;
+
+	assert(skb->queue_mapping < ndev->ae_handle->q_num);
+	ret = hns_nic_net_xmit_hw(ndev, skb,
+				  &tx_ring_data(priv, skb->queue_mapping));
+	if (ret == NETDEV_TX_OK) {
+		ndev->trans_start = jiffies;
+		ndev->stats.tx_bytes += skb->len;
+		ndev->stats.tx_packets++;
+	}
+	return (netdev_tx_t)ret;
+}
+
+static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct hnae_handle *h = priv->ae_handle;
+	int ret;
+
+	/* MTU < 68 is an error and causes problems on some kernels */
+	if (new_mtu < 68)
+		return -EINVAL;
+
+	if (!h->dev->ops->set_mtu)
+		return -ENOTSUPP;
+
+	if (netif_running(ndev)) {
+		(void)hns_nic_net_stop(ndev);
+		msleep(100);
+
+		ret = h->dev->ops->set_mtu(h, new_mtu);
+		if (ret)
+			netdev_err(ndev, "set mtu fail, return value %d\n",
+				   ret);
+
+		if (hns_nic_net_open(ndev))
+			netdev_err(ndev, "hns net open fail\n");
+	} else {
+		ret = h->dev->ops->set_mtu(h, new_mtu);
+	}
+
+	if (!ret)
+		ndev->mtu = new_mtu;
+
+	return ret;
+}
+
+/**
+ * nic_set_multicast_list - set mutl mac address
+ * @netdev: net device
+ * @p: mac address
+ *
+ * return void
+ */
+void hns_set_multicast_list(struct net_device *ndev)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct hnae_handle *h = priv->ae_handle;
+	struct netdev_hw_addr *ha = NULL;
+
+	if (!h)	{
+		netdev_err(ndev, "hnae handle is null\n");
+		return;
+	}
+
+	if (h->dev->ops->set_mc_addr) {
+		netdev_for_each_mc_addr(ha, ndev)
+			if (h->dev->ops->set_mc_addr(h, ha->addr))
+				netdev_err(ndev, "set multicast fail\n");
+	}
+}
+
+struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
+					      struct rtnl_link_stats64 *stats)
+{
+	int idx = 0;
+	u64 tx_bytes = 0;
+	u64 rx_bytes = 0;
+	u64 tx_pkts = 0;
+	u64 rx_pkts = 0;
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct hnae_handle *h = priv->ae_handle;
+
+	for (idx = 0; idx < h->q_num; idx++) {
+		tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
+		tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
+		rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
+		rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
+	}
+
+	stats->tx_bytes = tx_bytes;
+	stats->tx_packets = tx_pkts;
+	stats->rx_bytes = rx_bytes;
+	stats->rx_packets = rx_pkts;
+
+	stats->rx_errors = ndev->stats.rx_errors;
+	stats->multicast = ndev->stats.multicast;
+	stats->rx_length_errors = ndev->stats.rx_length_errors;
+	stats->rx_crc_errors = ndev->stats.rx_crc_errors;
+	stats->rx_missed_errors = ndev->stats.rx_missed_errors;
+
+	stats->tx_errors = ndev->stats.tx_errors;
+	stats->rx_dropped = ndev->stats.rx_dropped;
+	stats->tx_dropped = ndev->stats.tx_dropped;
+	stats->collisions = ndev->stats.collisions;
+	stats->rx_over_errors = ndev->stats.rx_over_errors;
+	stats->rx_frame_errors = ndev->stats.rx_frame_errors;
+	stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
+	stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
+	stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
+	stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
+	stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
+	stats->tx_window_errors = ndev->stats.tx_window_errors;
+	stats->rx_compressed = ndev->stats.rx_compressed;
+	stats->tx_compressed = ndev->stats.tx_compressed;
+
+	return stats;
+}
+
+static const struct net_device_ops hns_nic_netdev_ops = {
+	.ndo_open = hns_nic_net_open,
+	.ndo_stop = hns_nic_net_stop,
+	.ndo_start_xmit = hns_nic_net_xmit,
+	.ndo_tx_timeout = hns_nic_net_timeout,
+	.ndo_set_mac_address = hns_nic_net_set_mac_address,
+	.ndo_change_mtu = hns_nic_change_mtu,
+	.ndo_do_ioctl = hns_nic_do_ioctl,
+	.ndo_get_stats64 = hns_nic_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = hns_nic_poll_controller,
+#endif
+	.ndo_set_rx_mode = hns_set_multicast_list,
+};
+
+static void hns_nic_update_link_status(struct net_device *netdev)
+{
+	struct hns_nic_priv *priv = netdev_priv(netdev);
+
+	struct hnae_handle *h = priv->ae_handle;
+	int state = 1;
+
+	if (priv->phy) {
+		if (!genphy_update_link(priv->phy))
+			state = priv->phy->link;
+		else
+			state = 0;
+	}
+	state = state && h->dev->ops->get_status(h);
+
+	if (state != priv->link) {
+		if (state) {
+			netif_carrier_on(netdev);
+			netif_tx_wake_all_queues(netdev);
+			netdev_info(netdev, "link up\n");
+		} else {
+			netif_carrier_off(netdev);
+			netdev_info(netdev, "link down\n");
+		}
+		priv->link = state;
+	}
+}
+
+/* for dumping key regs*/
+static void hns_nic_dump(struct hns_nic_priv *priv)
+{
+	struct hnae_handle *h = priv->ae_handle;
+	struct hnae_ae_ops *ops = h->dev->ops;
+	u32 *data, reg_num, i;
+
+	if (ops->get_regs_len && ops->get_regs) {
+		reg_num = ops->get_regs_len(priv->ae_handle);
+		reg_num = (reg_num + 3ul) & ~3ul;
+		data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
+		if (data) {
+			ops->get_regs(priv->ae_handle, data);
+			for (i = 0; i < reg_num; i += 4)
+				pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+					i, data[i], data[i + 1],
+					data[i + 2], data[i + 3]);
+			kfree(data);
+		}
+	}
+
+	for (i = 0; i < h->q_num; i++) {
+		pr_info("tx_queue%d_next_to_clean:%d\n",
+			i, h->qs[i]->tx_ring.next_to_clean);
+		pr_info("tx_queue%d_next_to_use:%d\n",
+			i, h->qs[i]->tx_ring.next_to_use);
+		pr_info("rx_queue%d_next_to_clean:%d\n",
+			i, h->qs[i]->rx_ring.next_to_clean);
+		pr_info("rx_queue%d_next_to_use:%d\n",
+			i, h->qs[i]->rx_ring.next_to_use);
+	}
+}
+
+/* for resetting suntask*/
+static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
+{
+	enum hnae_port_type type = priv->ae_handle->port_type;
+
+	if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
+		return;
+	clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
+
+	/* If we're already down, removing or resetting, just bail */
+	if (test_bit(NIC_STATE_DOWN, &priv->state) ||
+	    test_bit(NIC_STATE_REMOVING, &priv->state) ||
+	    test_bit(NIC_STATE_RESETTING, &priv->state))
+		return;
+
+	hns_nic_dump(priv);
+	netdev_err(priv->netdev, "Reset %s port\n",
+		   (type == HNAE_PORT_DEBUG ? "debug" : "business"));
+
+	rtnl_lock();
+	if (type == HNAE_PORT_DEBUG) {
+		hns_nic_net_reinit(priv->netdev);
+	} else {
+		hns_nic_net_down(priv->netdev);
+		hns_nic_net_reset(priv->netdev);
+	}
+	rtnl_unlock();
+}
+
+/* for doing service complete*/
+static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
+{
+	assert(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
+
+	smp_mb__before_atomic();
+	clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
+}
+
+static void hns_nic_service_task(struct work_struct *work)
+{
+	struct hns_nic_priv *priv
+		= container_of(work, struct hns_nic_priv, service_task);
+	struct hnae_handle *h = priv->ae_handle;
+
+	hns_nic_update_link_status(priv->netdev);
+	h->dev->ops->update_led_status(h);
+	hns_nic_update_stats(priv->netdev);
+
+	hns_nic_reset_subtask(priv);
+	hns_nic_service_event_complete(priv);
+}
+
+static void hns_nic_task_schedule(struct hns_nic_priv *priv)
+{
+	if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
+	    !test_bit(NIC_STATE_REMOVING, &priv->state) &&
+	    !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
+		(void)schedule_work(&priv->service_task);
+}
+
+static void hns_nic_service_timer(unsigned long data)
+{
+	struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
+
+	(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
+
+	hns_nic_task_schedule(priv);
+}
+
+/**
+ * hns_tx_timeout_reset - initiate reset due to Tx timeout
+ * @priv: driver private struct
+ **/
+static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
+{
+	/* Do the reset outside of interrupt context */
+	if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
+		set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
+		netdev_warn(priv->netdev,
+			    "initiating reset due to tx timeout(%llu,0x%lx)\n",
+			    priv->tx_timeout_count, priv->state);
+		priv->tx_timeout_count++;
+		hns_nic_task_schedule(priv);
+	}
+}
+
+static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
+{
+	struct hnae_handle *h = priv->ae_handle;
+	struct hns_nic_ring_data *rd;
+	int i;
+
+	if (h->q_num > NIC_MAX_Q_PER_VF) {
+		netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
+		return -EINVAL;
+	}
+
+	priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
+				  GFP_KERNEL);
+	if (!priv->ring_data)
+		return -ENOMEM;
+
+	for (i = 0; i < h->q_num; i++) {
+		rd = &priv->ring_data[i];
+		rd->queue_index = i;
+		rd->ring = &h->qs[i]->tx_ring;
+		rd->poll_one = hns_nic_tx_poll_one;
+		rd->fini_process = hns_nic_tx_fini_pro;
+
+		netif_napi_add(priv->netdev, &rd->napi,
+			       hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+		rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+	}
+	for (i = h->q_num; i < h->q_num * 2; i++) {
+		rd = &priv->ring_data[i];
+		rd->queue_index = i - h->q_num;
+		rd->ring = &h->qs[i - h->q_num]->rx_ring;
+		rd->poll_one = hns_nic_rx_poll_one;
+		rd->ex_process = hns_nic_rx_up_pro;
+		rd->fini_process = hns_nic_rx_fini_pro;
+
+		netif_napi_add(priv->netdev, &rd->napi,
+			       hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+		rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+	}
+
+	return 0;
+}
+
+static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
+{
+	struct hnae_handle *h = priv->ae_handle;
+	int i;
+
+	for (i = 0; i < h->q_num * 2; i++) {
+		netif_napi_del(&priv->ring_data[i].napi);
+		if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+			irq_set_affinity_hint(priv->ring_data[i].ring->irq,
+					      NULL);
+			free_irq(priv->ring_data[i].ring->irq,
+				 &priv->ring_data[i]);
+		}
+
+		priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+	}
+	kfree(priv->ring_data);
+}
+
+static int hns_nic_try_get_ae(struct net_device *ndev)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct hnae_handle *h;
+	int ret;
+
+	h = hnae_get_handle(&priv->netdev->dev,
+			    priv->ae_name, priv->port_id, NULL);
+	if (IS_ERR_OR_NULL(h)) {
+		ret = PTR_ERR(h);
+		dev_dbg(priv->dev, "has not handle, register notifier!\n");
+		goto out;
+	}
+	priv->ae_handle = h;
+
+	ret = hns_nic_init_phy(ndev, h);
+	if (ret) {
+		dev_err(priv->dev, "probe phy device fail!\n");
+		goto out_init_phy;
+	}
+
+	ret = hns_nic_init_ring_data(priv);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_init_ring_data;
+	}
+
+	ret = register_netdev(ndev);
+	if (ret) {
+		dev_err(priv->dev, "probe register netdev fail!\n");
+		goto out_reg_ndev_fail;
+	}
+	return 0;
+
+out_reg_ndev_fail:
+	hns_nic_uninit_ring_data(priv);
+	priv->ring_data = NULL;
+out_init_phy:
+out_init_ring_data:
+	hnae_put_handle(priv->ae_handle);
+	priv->ae_handle = NULL;
+out:
+	return ret;
+}
+
+static int hns_nic_notifier_action(struct notifier_block *nb,
+				   unsigned long action, void *data)
+{
+	struct hns_nic_priv *priv =
+		container_of(nb, struct hns_nic_priv, notifier_block);
+
+	assert(action == HNAE_AE_REGISTER);
+
+	if (!hns_nic_try_get_ae(priv->netdev)) {
+		hnae_unregister_notifier(&priv->notifier_block);
+		priv->notifier_block.notifier_call = NULL;
+	}
+	return 0;
+}
+
+static int hns_nic_dev_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct net_device *ndev;
+	struct hns_nic_priv *priv;
+	struct device_node *node = dev->of_node;
+	int ret;
+
+	ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
+	if (!ndev)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, ndev);
+
+	priv = netdev_priv(ndev);
+	priv->dev = dev;
+	priv->netdev = ndev;
+
+	if (of_device_is_compatible(node, "hisilicon,hns-nic-v2"))
+		priv->enet_ver = AE_VERSION_2;
+	else
+		priv->enet_ver = AE_VERSION_1;
+
+	ret = of_property_read_string(node, "ae-name", &priv->ae_name);
+	if (ret)
+		goto out_read_string_fail;
+
+	ret = of_property_read_u32(node, "port-id", &priv->port_id);
+	if (ret)
+		goto out_read_string_fail;
+
+	hns_init_mac_addr(ndev);
+
+	ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+	ndev->priv_flags |= IFF_UNICAST_FLT;
+	ndev->netdev_ops = &hns_nic_netdev_ops;
+	hns_ethtool_set_ops(ndev);
+	ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+		NETIF_F_GRO;
+	ndev->vlan_features |=
+		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+	ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+
+	SET_NETDEV_DEV(ndev, dev);
+
+	if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
+		dev_dbg(dev, "set mask to 64bit\n");
+	else
+		dev_err(dev, "set mask to 32bit fail!\n");
+
+	/* carrier off reporting is important to ethtool even BEFORE open */
+	netif_carrier_off(ndev);
+
+	setup_timer(&priv->service_timer, hns_nic_service_timer,
+		    (unsigned long)priv);
+	INIT_WORK(&priv->service_task, hns_nic_service_task);
+
+	set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
+	clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
+	set_bit(NIC_STATE_DOWN, &priv->state);
+
+	if (hns_nic_try_get_ae(priv->netdev)) {
+		priv->notifier_block.notifier_call = hns_nic_notifier_action;
+		ret = hnae_register_notifier(&priv->notifier_block);
+		if (ret) {
+			dev_err(dev, "register notifier fail!\n");
+			goto out_notify_fail;
+		}
+		dev_dbg(dev, "has not handle, register notifier!\n");
+	}
+
+	return 0;
+
+out_notify_fail:
+	(void)cancel_work_sync(&priv->service_task);
+out_read_string_fail:
+	free_netdev(ndev);
+	return ret;
+}
+
+static int hns_nic_dev_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+
+	if (ndev->reg_state != NETREG_UNINITIALIZED)
+		unregister_netdev(ndev);
+
+	if (priv->ring_data)
+		hns_nic_uninit_ring_data(priv);
+	priv->ring_data = NULL;
+
+	if (priv->phy)
+		phy_disconnect(priv->phy);
+	priv->phy = NULL;
+
+	if (!IS_ERR_OR_NULL(priv->ae_handle))
+		hnae_put_handle(priv->ae_handle);
+	priv->ae_handle = NULL;
+	if (priv->notifier_block.notifier_call)
+		hnae_unregister_notifier(&priv->notifier_block);
+	priv->notifier_block.notifier_call = NULL;
+
+	set_bit(NIC_STATE_REMOVING, &priv->state);
+	(void)cancel_work_sync(&priv->service_task);
+
+	free_netdev(ndev);
+	return 0;
+}
+
+static const struct of_device_id hns_enet_of_match[] = {
+	{.compatible = "hisilicon,hns-nic-v1",},
+	{.compatible = "hisilicon,hns-nic-v2",},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, hns_enet_of_match);
+
+static struct platform_driver hns_nic_dev_driver = {
+	.driver = {
+		.name = "hns-nic",
+		.owner = THIS_MODULE,
+		.of_match_table = hns_enet_of_match,
+	},
+	.probe = hns_nic_dev_probe,
+	.remove = hns_nic_dev_remove,
+};
+
+module_platform_driver(hns_nic_dev_driver);
+
+MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
+MODULE_AUTHOR("Hisilicon, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:hns-nic");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
new file mode 100644
index 0000000..dae0ed1
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HNS_ENET_H
+#define __HNS_ENET_H
+
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+#include "hnae.h"
+
+enum hns_nic_state {
+	NIC_STATE_TESTING = 0,
+	NIC_STATE_RESETTING,
+	NIC_STATE_REINITING,
+	NIC_STATE_DOWN,
+	NIC_STATE_DISABLED,
+	NIC_STATE_REMOVING,
+	NIC_STATE_SERVICE_INITED,
+	NIC_STATE_SERVICE_SCHED,
+	NIC_STATE2_RESET_REQUESTED,
+	NIC_STATE_MAX
+};
+
+struct hns_nic_ring_data {
+	struct hnae_ring *ring;
+	struct napi_struct napi;
+	int queue_index;
+	int (*poll_one)(struct hns_nic_ring_data *, int, void *);
+	void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *);
+	void (*fini_process)(struct hns_nic_ring_data *);
+};
+
+struct hns_nic_priv {
+	const char *ae_name;
+	u32 enet_ver;
+	u32 port_id;
+	int phy_mode;
+	int phy_led_val;
+	struct phy_device *phy;
+	struct net_device *netdev;
+	struct device *dev;
+	struct hnae_handle *ae_handle;
+
+	/* the cb for nic to manage the ring buffer, the first half of the
+	 * array is for tx_ring and vice versa for the second half
+	 */
+	struct hns_nic_ring_data *ring_data;
+
+	/* The most recently read link state */
+	int link;
+	u64 tx_timeout_count;
+
+	unsigned long state;
+
+	struct timer_list service_timer;
+
+	struct work_struct service_task;
+
+	struct notifier_block notifier_block;
+};
+
+#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
+#define rx_ring_data(priv, idx) \
+	((priv)->ring_data[(priv)->ae_handle->q_num + (idx)])
+
+void hns_ethtool_set_ops(struct net_device *ndev);
+void hns_nic_net_reset(struct net_device *ndev);
+void hns_nic_net_reinit(struct net_device *netdev);
+int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h);
+int hns_nic_net_xmit_hw(struct net_device *ndev,
+			struct sk_buff *skb,
+			struct hns_nic_ring_data *ring_data);
+
+#endif	/**__HNS_ENET_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
new file mode 100644
index 0000000..2550208
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -0,0 +1,1230 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "hns_enet.h"
+
+#define HNS_PHY_PAGE_MDIX	0
+#define HNS_PHY_PAGE_LED	3
+#define HNS_PHY_PAGE_COPPER	0
+
+#define HNS_PHY_PAGE_REG	22	/* Page Selection Reg. */
+#define HNS_PHY_CSC_REG		16	/* Copper Specific Control Register */
+#define HNS_PHY_CSS_REG		17	/* Copper Specific Status Register */
+#define HNS_LED_FC_REG		16	/* LED Function Control Reg. */
+#define HNS_LED_PC_REG		17	/* LED Polarity Control Reg. */
+
+#define HNS_LED_FORCE_ON	9
+#define HNS_LED_FORCE_OFF	8
+
+#define HNS_CHIP_VERSION 660
+#define HNS_NET_STATS_CNT 26
+
+#define PHY_MDIX_CTRL_S		(5)
+#define PHY_MDIX_CTRL_M		(3 << PHY_MDIX_CTRL_S)
+
+#define PHY_MDIX_STATUS_B	(6)
+#define PHY_SPEED_DUP_RESOLVE_B	(11)
+
+/**
+ *hns_nic_get_link - get current link status
+ *@net_dev: net_device
+ *retuen 0 - success , negative --fail
+ */
+static u32 hns_nic_get_link(struct net_device *net_dev)
+{
+	struct hns_nic_priv *priv = netdev_priv(net_dev);
+	u32 link_stat = priv->link;
+	struct hnae_handle *h;
+
+	assert(priv && priv->ae_handle);
+	h = priv->ae_handle;
+
+	if (priv->phy) {
+		if (!genphy_update_link(priv->phy))
+			link_stat = priv->phy->link;
+		else
+			link_stat = 0;
+	}
+
+	if (h->dev && h->dev->ops && h->dev->ops->get_status)
+		link_stat = link_stat && h->dev->ops->get_status(h);
+	else
+		link_stat = 0;
+
+	return link_stat;
+}
+
+static void hns_get_mdix_mode(struct net_device *net_dev,
+			      struct ethtool_cmd *cmd)
+{
+	int mdix_ctrl, mdix, retval, is_resolved;
+	struct hns_nic_priv *priv = netdev_priv(net_dev);
+	struct phy_device *phy_dev = priv->phy;
+
+	if (!phy_dev || !phy_dev->bus) {
+		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+		return;
+	}
+
+	(void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG,
+			    HNS_PHY_PAGE_MDIX);
+
+	retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSC_REG);
+	mdix_ctrl = hnae_get_field(retval, PHY_MDIX_CTRL_M, PHY_MDIX_CTRL_S);
+
+	retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSS_REG);
+	mdix = hnae_get_bit(retval, PHY_MDIX_STATUS_B);
+	is_resolved = hnae_get_bit(retval, PHY_SPEED_DUP_RESOLVE_B);
+
+	(void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG,
+			    HNS_PHY_PAGE_COPPER);
+
+	switch (mdix_ctrl) {
+	case 0x0:
+		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+		break;
+	case 0x1:
+		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+		break;
+	case 0x3:
+		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+		break;
+	default:
+		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+		break;
+	}
+
+	if (!is_resolved)
+		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+	else if (mdix)
+		cmd->eth_tp_mdix = ETH_TP_MDI_X;
+	else
+		cmd->eth_tp_mdix = ETH_TP_MDI;
+}
+
+/**
+ *hns_nic_get_settings - implement ethtool get settings
+ *@net_dev: net_device
+ *@cmd: ethtool_cmd
+ *retuen 0 - success , negative --fail
+ */
+static int hns_nic_get_settings(struct net_device *net_dev,
+				struct ethtool_cmd *cmd)
+{
+	struct hns_nic_priv *priv = netdev_priv(net_dev);
+	struct hnae_handle *h;
+	u32 link_stat;
+	int ret;
+	u8 duplex;
+	u16 speed;
+
+	if (!priv || !priv->ae_handle)
+		return -ESRCH;
+
+	h = priv->ae_handle;
+	if (!h->dev || !h->dev->ops || !h->dev->ops->get_info)
+		return -ESRCH;
+
+	ret = h->dev->ops->get_info(h, NULL, &speed, &duplex);
+	if (ret < 0) {
+		netdev_err(net_dev, "%s get_info error!\n", __func__);
+		return -EINVAL;
+	}
+
+	/* When there is no phy, autoneg is off. */
+	cmd->autoneg = false;
+	ethtool_cmd_speed_set(cmd, speed);
+	cmd->duplex = duplex;
+
+	if (priv->phy)
+		(void)phy_ethtool_gset(priv->phy, cmd);
+
+	link_stat = hns_nic_get_link(net_dev);
+	if (!link_stat) {
+		ethtool_cmd_speed_set(cmd, (u32)SPEED_UNKNOWN);
+		cmd->duplex = DUPLEX_UNKNOWN;
+	}
+
+	if (cmd->autoneg)
+		cmd->advertising |= ADVERTISED_Autoneg;
+
+	cmd->supported |= h->if_support;
+	if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
+		cmd->supported |= SUPPORTED_TP;
+		cmd->advertising |= ADVERTISED_1000baseT_Full;
+	} else if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
+		cmd->supported |= SUPPORTED_FIBRE;
+		cmd->advertising |= ADVERTISED_10000baseKR_Full;
+	}
+
+	if (h->port_type == HNAE_PORT_SERVICE) {
+		cmd->port = PORT_FIBRE;
+		cmd->supported |= SUPPORTED_Pause;
+	} else {
+		cmd->port = PORT_TP;
+	}
+
+	cmd->transceiver = XCVR_EXTERNAL;
+	cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22);
+	hns_get_mdix_mode(net_dev, cmd);
+
+	return 0;
+}
+
+/**
+ *hns_nic_set_settings - implement ethtool set settings
+ *@net_dev: net_device
+ *@cmd: ethtool_cmd
+ *retuen 0 - success , negative --fail
+ */
+static int hns_nic_set_settings(struct net_device *net_dev,
+				struct ethtool_cmd *cmd)
+{
+	struct hns_nic_priv *priv = netdev_priv(net_dev);
+	struct hnae_handle *h;
+	int link_stat;
+	u32 speed;
+	u8 duplex, autoneg;
+
+	if (!netif_running(net_dev))
+		return -ESRCH;
+
+	if (!priv || !priv->ae_handle || !priv->ae_handle->dev ||
+	    !priv->ae_handle->dev->ops)
+		return -ENODEV;
+
+	h = priv->ae_handle;
+	link_stat = hns_nic_get_link(net_dev);
+	duplex = cmd->duplex;
+	speed = ethtool_cmd_speed(cmd);
+	autoneg = cmd->autoneg;
+
+	if (!link_stat) {
+		if (duplex != (u8)DUPLEX_UNKNOWN || speed != (u32)SPEED_UNKNOWN)
+			return -EINVAL;
+
+		if (h->phy_if == PHY_INTERFACE_MODE_SGMII && h->phy_node) {
+			priv->phy->autoneg = autoneg;
+			return phy_start_aneg(priv->phy);
+		}
+	}
+
+	if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
+		if (autoneg != AUTONEG_DISABLE)
+			return -EINVAL;
+
+		if (speed != SPEED_10000 || duplex != DUPLEX_FULL)
+			return -EINVAL;
+	} else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
+		if (!h->phy_node && autoneg != AUTONEG_DISABLE)
+			return -EINVAL;
+
+		if (speed == SPEED_1000 && duplex == DUPLEX_HALF)
+			return -EINVAL;
+
+		if (speed != SPEED_10 && speed != SPEED_100 &&
+		    speed != SPEED_1000)
+			return -EINVAL;
+	} else {
+		netdev_err(net_dev, "Not supported!");
+		return -ENOTSUPP;
+	}
+
+	if (priv->phy) {
+		return phy_ethtool_sset(priv->phy, cmd);
+	} else if (h->dev->ops->adjust_link && link_stat) {
+		h->dev->ops->adjust_link(h, speed, duplex);
+		return 0;
+	}
+	netdev_err(net_dev, "Not supported!");
+	return -ENOTSUPP;
+}
+
+static const char hns_nic_test_strs[][ETH_GSTRING_LEN] = {
+	"Mac    Loopback test",
+	"Serdes Loopback test",
+	"Phy    Loopback test"
+};
+
+static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
+{
+#define COPPER_CONTROL_REG 0
+#define PHY_LOOP_BACK BIT(14)
+	u16 val = 0;
+
+	if (phy_dev->is_c45) /* c45 branch adding for XGE PHY */
+		return -ENOTSUPP;
+
+	if (en) {
+		/* speed : 1000M */
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    HNS_PHY_PAGE_REG, 2);
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    21, 0x1046);
+		/* Force Master */
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    9, 0x1F00);
+		/* Soft-reset */
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    0, 0x9140);
+		/* If autoneg disabled,two soft-reset operations */
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    0, 0x9140);
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    22, 0xFA);
+
+		/* Default is 0x0400 */
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    1, 0x418);
+
+		/* Force 1000M Link, Default is 0x0200 */
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    7, 0x20C);
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    22, 0);
+
+		/* Enable MAC loop-back */
+		val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr,
+					COPPER_CONTROL_REG);
+		val |= PHY_LOOP_BACK;
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    COPPER_CONTROL_REG, val);
+	} else {
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    22, 0xFA);
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    1, 0x400);
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    7, 0x200);
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    22, 0);
+
+		val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr,
+					COPPER_CONTROL_REG);
+		val &= ~PHY_LOOP_BACK;
+		(void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+				    COPPER_CONTROL_REG, val);
+	}
+	return 0;
+}
+
+static int __lb_setup(struct net_device *ndev,
+		      enum hnae_loop loop)
+{
+	int ret = 0;
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct phy_device *phy_dev = priv->phy;
+	struct hnae_handle *h = priv->ae_handle;
+
+	switch (loop) {
+	case MAC_INTERNALLOOP_PHY:
+		if ((phy_dev) && (!phy_dev->is_c45))
+			ret = hns_nic_config_phy_loopback(phy_dev, 0x1);
+		break;
+	case MAC_INTERNALLOOP_MAC:
+		if ((h->dev->ops->set_loopback) &&
+		    (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII))
+			ret = h->dev->ops->set_loopback(h, loop, 0x1);
+		break;
+	case MAC_INTERNALLOOP_SERDES:
+		if (h->dev->ops->set_loopback)
+			ret = h->dev->ops->set_loopback(h, loop, 0x1);
+		break;
+	case MAC_LOOP_NONE:
+		if ((phy_dev) && (!phy_dev->is_c45))
+			ret |= hns_nic_config_phy_loopback(phy_dev, 0x0);
+
+		if (h->dev->ops->set_loopback) {
+			if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+				ret |= h->dev->ops->set_loopback(h,
+					MAC_INTERNALLOOP_MAC, 0x0);
+
+			ret |= h->dev->ops->set_loopback(h,
+				MAC_INTERNALLOOP_SERDES, 0x0);
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int __lb_up(struct net_device *ndev,
+		   enum hnae_loop loop_mode)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct hnae_handle *h = priv->ae_handle;
+	int speed, duplex;
+	int ret;
+
+	hns_nic_net_reset(ndev);
+
+	if (priv->phy) {
+		phy_disconnect(priv->phy);
+		msleep(100);
+
+		ret = hns_nic_init_phy(ndev, h);
+		if (ret)
+			return ret;
+	}
+
+	ret = __lb_setup(ndev, loop_mode);
+	if (ret)
+		return ret;
+
+	msleep(100);
+
+	ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
+	if (ret)
+		return ret;
+
+	if (priv->phy)
+		phy_start(priv->phy);
+
+	/* link adjust duplex*/
+	if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+		speed = 1000;
+	else
+		speed = 10000;
+	duplex = 1;
+
+	h->dev->ops->adjust_link(h, speed, duplex);
+
+	return 0;
+}
+
+static void __lb_other_process(struct hns_nic_ring_data *ring_data,
+			       struct sk_buff *skb)
+{
+	struct net_device *ndev;
+	struct hnae_ring *ring;
+	struct netdev_queue *dev_queue;
+	struct sk_buff *new_skb;
+	unsigned int frame_size;
+	int check_ok;
+	u32 i;
+	char buff[33]; /* 32B data and the last character '\0' */
+
+	if (!ring_data) { /* Just for doing create frame*/
+		frame_size = skb->len;
+		memset(skb->data, 0xFF, frame_size);
+		frame_size &= ~1ul;
+		memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+		memset(&skb->data[frame_size / 2 + 10], 0xBE,
+		       frame_size / 2 - 11);
+		memset(&skb->data[frame_size / 2 + 12], 0xAF,
+		       frame_size / 2 - 13);
+		return;
+	}
+
+	ring = ring_data->ring;
+	ndev = ring_data->napi.dev;
+	if (is_tx_ring(ring)) { /* for tx queue reset*/
+		dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+		netdev_tx_reset_queue(dev_queue);
+		return;
+	}
+
+	frame_size = skb->len;
+	frame_size &= ~1ul;
+	/* for mutl buffer*/
+	new_skb = skb_copy(skb, GFP_ATOMIC);
+	dev_kfree_skb_any(skb);
+	skb = new_skb;
+
+	check_ok = 0;
+	if (*(skb->data + 10) == 0xFF) { /* for rx check frame*/
+		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+		    (*(skb->data + frame_size / 2 + 12) == 0xAF))
+			check_ok = 1;
+	}
+
+	if (check_ok) {
+		ndev->stats.rx_packets++;
+		ndev->stats.rx_bytes += skb->len;
+	} else {
+		ndev->stats.rx_frame_errors++;
+		for (i = 0; i < skb->len; i++) {
+			snprintf(buff + i % 16 * 2, 3, /* tailing \0*/
+				 "%02x", *(skb->data + i));
+			if ((i % 16 == 15) || (i == skb->len - 1))
+				pr_info("%s\n", buff);
+		}
+	}
+	dev_kfree_skb_any(skb);
+}
+
+static int __lb_clean_rings(struct hns_nic_priv *priv,
+			    int ringid0, int ringid1, int budget)
+{
+	int i, ret;
+	struct hns_nic_ring_data *ring_data;
+	struct net_device *ndev = priv->netdev;
+	unsigned long rx_packets = ndev->stats.rx_packets;
+	unsigned long rx_bytes = ndev->stats.rx_bytes;
+	unsigned long rx_frame_errors = ndev->stats.rx_frame_errors;
+
+	for (i = ringid0; i <= ringid1; i++) {
+		ring_data = &priv->ring_data[i];
+		(void)ring_data->poll_one(ring_data,
+					  budget, __lb_other_process);
+	}
+	ret = (int)(ndev->stats.rx_packets - rx_packets);
+	ndev->stats.rx_packets = rx_packets;
+	ndev->stats.rx_bytes = rx_bytes;
+	ndev->stats.rx_frame_errors = rx_frame_errors;
+	return ret;
+}
+
+/**
+ * nic_run_loopback_test -  run loopback test
+ * @nic_dev: net device
+ * @loopback_type: loopback type
+ */
+static int __lb_run_test(struct net_device *ndev,
+			 enum hnae_loop loop_mode)
+{
+#define NIC_LB_TEST_PKT_NUM_PER_CYCLE 1
+#define NIC_LB_TEST_RING_ID 0
+#define NIC_LB_TEST_FRAME_SIZE 128
+/* nic loopback test err  */
+#define NIC_LB_TEST_NO_MEM_ERR 1
+#define NIC_LB_TEST_TX_CNT_ERR 2
+#define NIC_LB_TEST_RX_CNT_ERR 3
+#define NIC_LB_TEST_RX_PKG_ERR 4
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct hnae_handle *h = priv->ae_handle;
+	int i, j, lc, good_cnt, ret_val = 0;
+	unsigned int size;
+	netdev_tx_t tx_ret_val;
+	struct sk_buff *skb;
+
+	size = NIC_LB_TEST_FRAME_SIZE;
+	/* allocate test skb */
+	skb = alloc_skb(size, GFP_KERNEL);
+	if (!skb)
+		return NIC_LB_TEST_NO_MEM_ERR;
+
+	/* place data into test skb */
+	(void)skb_put(skb, size);
+	__lb_other_process(NULL, skb);
+	skb->queue_mapping = NIC_LB_TEST_RING_ID;
+
+	lc = 1;
+	for (j = 0; j < lc; j++) {
+		/* reset count of good packets */
+		good_cnt = 0;
+		/* place 64 packets on the transmit queue*/
+		for (i = 0; i < NIC_LB_TEST_PKT_NUM_PER_CYCLE; i++) {
+			(void)skb_get(skb);
+
+			tx_ret_val = (netdev_tx_t)hns_nic_net_xmit_hw(
+				ndev, skb,
+				&tx_ring_data(priv, skb->queue_mapping));
+			if (tx_ret_val == NETDEV_TX_OK)
+				good_cnt++;
+			else
+				break;
+		}
+		if (good_cnt != NIC_LB_TEST_PKT_NUM_PER_CYCLE) {
+			ret_val = NIC_LB_TEST_TX_CNT_ERR;
+			dev_err(priv->dev, "%s sent fail, cnt=0x%x, budget=0x%x\n",
+				hns_nic_test_strs[loop_mode], good_cnt,
+				NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+			break;
+		}
+
+		/* allow 100 milliseconds for packets to go from Tx to Rx */
+		msleep(100);
+
+		good_cnt = __lb_clean_rings(priv,
+					    h->q_num, h->q_num * 2 - 1,
+					    NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+		if (good_cnt != NIC_LB_TEST_PKT_NUM_PER_CYCLE) {
+			ret_val = NIC_LB_TEST_RX_CNT_ERR;
+			dev_err(priv->dev, "%s recv fail, cnt=0x%x, budget=0x%x\n",
+				hns_nic_test_strs[loop_mode], good_cnt,
+				NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+			break;
+		}
+		(void)__lb_clean_rings(priv,
+				       NIC_LB_TEST_RING_ID, NIC_LB_TEST_RING_ID,
+				       NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+	}
+
+	/* free the original skb */
+	kfree_skb(skb);
+
+	return ret_val;
+}
+
+static int __lb_down(struct net_device *ndev)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct hnae_handle *h = priv->ae_handle;
+	int ret;
+
+	ret = __lb_setup(ndev, MAC_LOOP_NONE);
+	if (ret)
+		netdev_err(ndev, "%s: __lb_setup return error(%d)!\n",
+			   __func__,
+			   ret);
+
+	if (priv->phy)
+		phy_stop(priv->phy);
+
+	if (h->dev->ops->stop)
+		h->dev->ops->stop(h);
+
+	usleep_range(10000, 20000);
+	(void)__lb_clean_rings(priv, 0, h->q_num - 1, 256);
+
+	hns_nic_net_reset(ndev);
+
+	return 0;
+}
+
+/**
+ * hns_nic_self_test - self test
+ * @dev: net device
+ * @eth_test: test cmd
+ * @data: test result
+ */
+static void hns_nic_self_test(struct net_device *ndev,
+			      struct ethtool_test *eth_test, u64 *data)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	bool if_running = netif_running(ndev);
+#define SELF_TEST_TPYE_NUM 3
+	int st_param[SELF_TEST_TPYE_NUM][2];
+	int i;
+	int test_index = 0;
+
+	st_param[0][0] = MAC_INTERNALLOOP_MAC; /* XGE not supported lb */
+	st_param[0][1] = (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII);
+	st_param[1][0] = MAC_INTERNALLOOP_SERDES;
+	st_param[1][1] = 1; /*serdes must exist*/
+	st_param[2][0] = MAC_INTERNALLOOP_PHY; /* only supporte phy node*/
+	st_param[2][1] = ((!!(priv->ae_handle->phy_node)) &&
+		(priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII));
+
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+		set_bit(NIC_STATE_TESTING, &priv->state);
+
+		if (if_running)
+			(void)dev_close(ndev);
+
+		for (i = 0; i < SELF_TEST_TPYE_NUM; i++) {
+			if (!st_param[i][1])
+				continue;	/* NEXT testing */
+
+			data[test_index] = __lb_up(ndev,
+				(enum hnae_loop)st_param[i][0]);
+			if (!data[test_index]) {
+				data[test_index] = __lb_run_test(
+					ndev, (enum hnae_loop)st_param[i][0]);
+				(void)__lb_down(ndev);
+			}
+
+			if (data[test_index])
+				eth_test->flags |= ETH_TEST_FL_FAILED;
+
+			test_index++;
+		}
+
+		hns_nic_net_reset(priv->netdev);
+
+		clear_bit(NIC_STATE_TESTING, &priv->state);
+
+		if (if_running)
+			(void)dev_open(ndev);
+	}
+	/* Online tests aren't run; pass by default */
+
+	(void)msleep_interruptible(4 * 1000);
+}
+
+/**
+ * hns_nic_get_drvinfo - get net driver info
+ * @dev: net device
+ * @drvinfo: driver info
+ */
+static void hns_nic_get_drvinfo(struct net_device *net_dev,
+				struct ethtool_drvinfo *drvinfo)
+{
+	struct hns_nic_priv *priv = netdev_priv(net_dev);
+
+	assert(priv);
+
+	strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
+		sizeof(drvinfo->version));
+	drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
+
+	strncpy(drvinfo->driver, HNAE_DRIVER_NAME, sizeof(drvinfo->driver));
+	drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
+
+	strncpy(drvinfo->bus_info, priv->dev->bus->name,
+		sizeof(drvinfo->bus_info));
+	drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
+
+	strncpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN);
+	drvinfo->eedump_len = 0;
+}
+
+/**
+ * hns_get_ringparam - get ring parameter
+ * @dev: net device
+ * @param: ethtool parameter
+ */
+void hns_get_ringparam(struct net_device *net_dev,
+		       struct ethtool_ringparam *param)
+{
+	struct hns_nic_priv *priv = netdev_priv(net_dev);
+	struct hnae_ae_ops *ops;
+	struct hnae_queue *queue;
+	u32 uplimit = 0;
+
+	queue = priv->ae_handle->qs[0];
+	ops = priv->ae_handle->dev->ops;
+
+	if (ops->get_ring_bdnum_limit)
+		ops->get_ring_bdnum_limit(queue, &uplimit);
+
+	param->rx_max_pending = uplimit;
+	param->tx_max_pending = uplimit;
+	param->rx_pending = queue->rx_ring.desc_num;
+	param->tx_pending = queue->tx_ring.desc_num;
+}
+
+/**
+ * hns_get_pauseparam - get pause parameter
+ * @dev: net device
+ * @param: pause parameter
+ */
+static void hns_get_pauseparam(struct net_device *net_dev,
+			       struct ethtool_pauseparam *param)
+{
+	struct hns_nic_priv *priv = netdev_priv(net_dev);
+	struct hnae_ae_ops *ops;
+
+	ops = priv->ae_handle->dev->ops;
+
+	if (ops->get_pauseparam)
+		ops->get_pauseparam(priv->ae_handle, &param->autoneg,
+					    &param->rx_pause, &param->tx_pause);
+}
+
+/**
+ * hns_set_pauseparam - set pause parameter
+ * @dev: net device
+ * @param: pause parameter
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_set_pauseparam(struct net_device *net_dev,
+			      struct ethtool_pauseparam *param)
+{
+	struct hns_nic_priv *priv = netdev_priv(net_dev);
+	struct hnae_handle *h;
+	struct hnae_ae_ops *ops;
+
+	assert(priv || priv->ae_handle);
+
+	h = priv->ae_handle;
+	ops = h->dev->ops;
+
+	if (!ops->set_pauseparam)
+		return -ESRCH;
+
+	return ops->set_pauseparam(priv->ae_handle, param->autoneg,
+				   param->rx_pause, param->tx_pause);
+}
+
+/**
+ * hns_get_coalesce - get coalesce info.
+ * @dev: net device
+ * @ec: coalesce info.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_get_coalesce(struct net_device *net_dev,
+			    struct ethtool_coalesce *ec)
+{
+	struct hns_nic_priv *priv = netdev_priv(net_dev);
+	struct hnae_ae_ops *ops;
+
+	ops = priv->ae_handle->dev->ops;
+
+	ec->use_adaptive_rx_coalesce = 1;
+	ec->use_adaptive_tx_coalesce = 1;
+
+	if ((!ops->get_coalesce_usecs) ||
+	    (!ops->get_rx_max_coalesced_frames))
+		return -ESRCH;
+
+	ops->get_coalesce_usecs(priv->ae_handle,
+					&ec->tx_coalesce_usecs,
+					&ec->rx_coalesce_usecs);
+
+	ops->get_rx_max_coalesced_frames(
+		priv->ae_handle,
+		&ec->tx_max_coalesced_frames,
+		&ec->rx_max_coalesced_frames);
+
+	return 0;
+}
+
+/**
+ * hns_set_coalesce - set coalesce info.
+ * @dev: net device
+ * @ec: coalesce info.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_set_coalesce(struct net_device *net_dev,
+			    struct ethtool_coalesce *ec)
+{
+	struct hns_nic_priv *priv = netdev_priv(net_dev);
+	struct hnae_ae_ops *ops;
+	int ret;
+
+	assert(priv || priv->ae_handle);
+
+	ops = priv->ae_handle->dev->ops;
+
+	if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
+		return -EINVAL;
+
+	if (ec->rx_max_coalesced_frames != ec->tx_max_coalesced_frames)
+		return -EINVAL;
+
+	if ((!ops->set_coalesce_usecs) ||
+	    (!ops->set_coalesce_frames))
+		return -ESRCH;
+
+	ops->set_coalesce_usecs(priv->ae_handle,
+					ec->rx_coalesce_usecs);
+
+	ret = ops->set_coalesce_frames(
+		priv->ae_handle,
+		ec->rx_max_coalesced_frames);
+
+	return ret;
+}
+
+/**
+ * hns_get_channels - get channel info.
+ * @dev: net device
+ * @ch: channel info.
+ */
+void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
+{
+	struct hns_nic_priv *priv = netdev_priv(net_dev);
+
+	ch->max_rx = priv->ae_handle->q_num;
+	ch->max_tx = priv->ae_handle->q_num;
+
+	ch->rx_count = priv->ae_handle->q_num;
+	ch->tx_count = priv->ae_handle->q_num;
+}
+
+/**
+ * get_ethtool_stats - get detail statistics.
+ * @dev: net device
+ * @stats: statistics info.
+ * @data: statistics data.
+ */
+void hns_get_ethtool_stats(struct net_device *netdev,
+			   struct ethtool_stats *stats, u64 *data)
+{
+	u64 *p = data;
+	struct hns_nic_priv *priv = netdev_priv(netdev);
+	struct hnae_handle *h = priv->ae_handle;
+	const struct rtnl_link_stats64 *net_stats;
+	struct rtnl_link_stats64 temp;
+
+	if (!h->dev->ops->get_stats || !h->dev->ops->update_stats) {
+		netdev_err(netdev, "get_stats or update_stats is null!\n");
+		return;
+	}
+
+	h->dev->ops->update_stats(h, &netdev->stats);
+
+	net_stats = dev_get_stats(netdev, &temp);
+
+	/* get netdev statistics */
+	p[0] = net_stats->rx_packets;
+	p[1] = net_stats->tx_packets;
+	p[2] = net_stats->rx_bytes;
+	p[3] = net_stats->tx_bytes;
+	p[4] = net_stats->rx_errors;
+	p[5] = net_stats->tx_errors;
+	p[6] = net_stats->rx_dropped;
+	p[7] = net_stats->tx_dropped;
+	p[8] = net_stats->multicast;
+	p[9] = net_stats->collisions;
+	p[10] = net_stats->rx_over_errors;
+	p[11] = net_stats->rx_crc_errors;
+	p[12] = net_stats->rx_frame_errors;
+	p[13] = net_stats->rx_fifo_errors;
+	p[14] = net_stats->rx_missed_errors;
+	p[15] = net_stats->tx_aborted_errors;
+	p[16] = net_stats->tx_carrier_errors;
+	p[17] = net_stats->tx_fifo_errors;
+	p[18] = net_stats->tx_heartbeat_errors;
+	p[19] = net_stats->rx_length_errors;
+	p[20] = net_stats->tx_window_errors;
+	p[21] = net_stats->rx_compressed;
+	p[22] = net_stats->tx_compressed;
+
+	p[23] = netdev->rx_dropped.counter;
+	p[24] = netdev->tx_dropped.counter;
+
+	p[25] = priv->tx_timeout_count;
+
+	/* get driver statistics */
+	h->dev->ops->get_stats(h, &p[26]);
+}
+
+/**
+ * get_strings: Return a set of strings that describe the requested objects
+ * @dev: net device
+ * @stats: string set ID.
+ * @data: objects data.
+ */
+void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+	struct hns_nic_priv *priv = netdev_priv(netdev);
+	struct hnae_handle *h = priv->ae_handle;
+	char *buff = (char *)data;
+
+	if (!h->dev->ops->get_strings) {
+		netdev_err(netdev, "h->dev->ops->get_strings is null!\n");
+		return;
+	}
+
+	if (stringset == ETH_SS_TEST) {
+		if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) {
+			memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_MAC],
+			       ETH_GSTRING_LEN);
+			buff += ETH_GSTRING_LEN;
+		}
+		memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES],
+		       ETH_GSTRING_LEN);
+		buff += ETH_GSTRING_LEN;
+		if ((priv->phy) && (!priv->phy->is_c45))
+			memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_PHY],
+			       ETH_GSTRING_LEN);
+
+	} else {
+		snprintf(buff, ETH_GSTRING_LEN, "rx_packets");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "tx_packets");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "rx_bytes");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "tx_bytes");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "rx_errors");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "tx_errors");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "rx_dropped");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "tx_dropped");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "multicast");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "collisions");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "rx_over_errors");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "rx_crc_errors");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "rx_frame_errors");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "rx_fifo_errors");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "rx_missed_errors");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "tx_aborted_errors");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "tx_carrier_errors");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "tx_fifo_errors");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "tx_heartbeat_errors");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "rx_length_errors");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "tx_window_errors");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "rx_compressed");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "tx_compressed");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "netdev_rx_dropped");
+		buff = buff + ETH_GSTRING_LEN;
+		snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_dropped");
+		buff = buff + ETH_GSTRING_LEN;
+
+		snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_timeout");
+		buff = buff + ETH_GSTRING_LEN;
+
+		h->dev->ops->get_strings(h, stringset, (u8 *)buff);
+	}
+}
+
+/**
+ * nic_get_sset_count - get string set count witch returned by nic_get_strings.
+ * @dev: net device
+ * @stringset: string set index, 0: self test string; 1: statistics string.
+ *
+ * Return string set count.
+ */
+int hns_get_sset_count(struct net_device *netdev, int stringset)
+{
+	struct hns_nic_priv *priv = netdev_priv(netdev);
+	struct hnae_handle *h = priv->ae_handle;
+	struct hnae_ae_ops *ops = h->dev->ops;
+
+	if (!ops->get_sset_count) {
+		netdev_err(netdev, "get_sset_count is null!\n");
+		return -EOPNOTSUPP;
+	}
+	if (stringset == ETH_SS_TEST) {
+		u32 cnt = (sizeof(hns_nic_test_strs) / ETH_GSTRING_LEN);
+
+		if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII)
+			cnt--;
+
+		if ((!priv->phy) || (priv->phy->is_c45))
+			cnt--;
+
+		return cnt;
+	} else {
+		return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
+	}
+}
+
+/**
+ * hns_phy_led_set - set phy LED status.
+ * @dev: net device
+ * @value: LED state.
+ *
+ * Return 0 on success, negative on failure.
+ */
+int hns_phy_led_set(struct net_device *netdev, int value)
+{
+	int retval;
+	struct hns_nic_priv *priv = netdev_priv(netdev);
+	struct phy_device *phy_dev = priv->phy;
+
+	if (!phy_dev->bus) {
+		netdev_err(netdev, "phy_dev->bus is null!\n");
+		return -EINVAL;
+	}
+	retval = mdiobus_write(phy_dev->bus, phy_dev->addr,
+			       HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
+	retval = mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_LED_FC_REG,
+			       value);
+	retval = mdiobus_write(phy_dev->bus, phy_dev->addr,
+			       HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
+	if (retval) {
+		netdev_err(netdev, "mdiobus_write fail !\n");
+		return retval;
+	}
+	return 0;
+}
+
+/**
+ * nic_set_phys_id - set phy identify LED.
+ * @dev: net device
+ * @state: LED state.
+ *
+ * Return 0 on success, negative on failure.
+ */
+int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+{
+	struct hns_nic_priv *priv = netdev_priv(netdev);
+	struct hnae_handle *h = priv->ae_handle;
+	struct phy_device *phy_dev = priv->phy;
+	int ret;
+
+	if (phy_dev)
+		switch (state) {
+		case ETHTOOL_ID_ACTIVE:
+			ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+					    HNS_PHY_PAGE_REG,
+					    HNS_PHY_PAGE_LED);
+			if (ret)
+				return ret;
+
+			priv->phy_led_val = (u16)mdiobus_read(phy_dev->bus,
+							      phy_dev->addr,
+							      HNS_LED_FC_REG);
+
+			ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+					    HNS_PHY_PAGE_REG,
+					    HNS_PHY_PAGE_COPPER);
+			if (ret)
+				return ret;
+			return 2;
+		case ETHTOOL_ID_ON:
+			ret = hns_phy_led_set(netdev, HNS_LED_FORCE_ON);
+			if (ret)
+				return ret;
+			break;
+		case ETHTOOL_ID_OFF:
+			ret = hns_phy_led_set(netdev, HNS_LED_FORCE_OFF);
+			if (ret)
+				return ret;
+			break;
+		case ETHTOOL_ID_INACTIVE:
+			ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+					    HNS_PHY_PAGE_REG,
+					    HNS_PHY_PAGE_LED);
+			if (ret)
+				return ret;
+
+			ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+					    HNS_LED_FC_REG, priv->phy_led_val);
+			if (ret)
+				return ret;
+
+			ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+					    HNS_PHY_PAGE_REG,
+					    HNS_PHY_PAGE_COPPER);
+			if (ret)
+				return ret;
+			break;
+		default:
+			return -EINVAL;
+		}
+	else
+		switch (state) {
+		case ETHTOOL_ID_ACTIVE:
+			return h->dev->ops->set_led_id(h, HNAE_LED_ACTIVE);
+		case ETHTOOL_ID_ON:
+			return h->dev->ops->set_led_id(h, HNAE_LED_ON);
+		case ETHTOOL_ID_OFF:
+			return h->dev->ops->set_led_id(h, HNAE_LED_OFF);
+		case ETHTOOL_ID_INACTIVE:
+			return h->dev->ops->set_led_id(h, HNAE_LED_INACTIVE);
+		default:
+			return -EINVAL;
+		}
+
+	return 0;
+}
+
+/**
+ * hns_get_regs - get net device register
+ * @dev: net device
+ * @cmd: ethtool cmd
+ * @date: register data
+ */
+void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
+		  void *data)
+{
+	struct hns_nic_priv *priv = netdev_priv(net_dev);
+	struct hnae_ae_ops *ops;
+
+	assert(priv || priv->ae_handle);
+
+	ops = priv->ae_handle->dev->ops;
+
+	cmd->version = HNS_CHIP_VERSION;
+	if (!ops->get_regs) {
+		netdev_err(net_dev, "ops->get_regs is null!\n");
+		return;
+	}
+	ops->get_regs(priv->ae_handle, data);
+}
+
+/**
+ * nic_get_regs_len - get total register len.
+ * @dev: net device
+ *
+ * Return total register len.
+ */
+static int hns_get_regs_len(struct net_device *net_dev)
+{
+	u32 reg_num;
+	struct hns_nic_priv *priv = netdev_priv(net_dev);
+	struct hnae_ae_ops *ops;
+
+	assert(priv || priv->ae_handle);
+
+	ops = priv->ae_handle->dev->ops;
+	if (!ops->get_regs_len) {
+		netdev_err(net_dev, "ops->get_regs_len is null!\n");
+		return -EOPNOTSUPP;
+	}
+
+	reg_num = ops->get_regs_len(priv->ae_handle);
+	if (reg_num > 0)
+		return reg_num * sizeof(u32);
+	else
+		return reg_num;	/* error code */
+}
+
+/**
+ * hns_nic_nway_reset - nway reset
+ * @dev: net device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_nic_nway_reset(struct net_device *netdev)
+{
+	int ret = 0;
+	struct hns_nic_priv *priv = netdev_priv(netdev);
+	struct phy_device *phy = priv->phy;
+
+	if (netif_running(netdev)) {
+		if (phy)
+			ret = genphy_restart_aneg(phy);
+	}
+
+	return ret;
+}
+
+static struct ethtool_ops hns_ethtool_ops = {
+	.get_drvinfo = hns_nic_get_drvinfo,
+	.get_link  = hns_nic_get_link,
+	.get_settings  = hns_nic_get_settings,
+	.set_settings  = hns_nic_set_settings,
+	.get_ringparam = hns_get_ringparam,
+	.get_pauseparam = hns_get_pauseparam,
+	.set_pauseparam = hns_set_pauseparam,
+	.get_coalesce = hns_get_coalesce,
+	.set_coalesce = hns_set_coalesce,
+	.get_channels = hns_get_channels,
+	.self_test = hns_nic_self_test,
+	.get_strings = hns_get_strings,
+	.get_sset_count = hns_get_sset_count,
+	.get_ethtool_stats = hns_get_ethtool_stats,
+	.set_phys_id = hns_set_phys_id,
+	.get_regs_len = hns_get_regs_len,
+	.get_regs = hns_get_regs,
+	.nway_reset = hns_nic_nway_reset,
+};
+
+void hns_ethtool_set_ops(struct net_device *ndev)
+{
+	ndev->ethtool_ops = &hns_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
new file mode 100644
index 0000000..e4ec52a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -0,0 +1,520 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock_types.h>
+
+#define MDIO_DRV_NAME "Hi-HNS_MDIO"
+#define MDIO_BUS_NAME "Hisilicon MII Bus"
+#define MDIO_DRV_VERSION "1.3.0"
+#define MDIO_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
+#define MDIO_DRV_STRING MDIO_BUS_NAME
+#define MDIO_DEFAULT_DEVICE_DESCR MDIO_BUS_NAME
+
+#define MDIO_CTL_DEV_ADDR(x)	(x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x)	((x & 0x1f) << 5)
+
+#define MDIO_TIMEOUT			1000000
+
+struct hns_mdio_device {
+	void *vbase;		/* mdio reg base address */
+	void *sys_vbase;
+};
+
+/* mdio reg */
+#define MDIO_COMMAND_REG		0x0
+#define MDIO_ADDR_REG			0x4
+#define MDIO_WDATA_REG			0x8
+#define MDIO_RDATA_REG			0xc
+#define MDIO_STA_REG			0x10
+
+/* cfg phy bit map */
+#define MDIO_CMD_DEVAD_M	0x1f
+#define MDIO_CMD_DEVAD_S	0
+#define MDIO_CMD_PRTAD_M	0x1f
+#define MDIO_CMD_PRTAD_S	5
+#define MDIO_CMD_OP_M		0x3
+#define MDIO_CMD_OP_S		10
+#define MDIO_CMD_ST_M		0x3
+#define MDIO_CMD_ST_S		12
+#define MDIO_CMD_START_B	14
+
+#define MDIO_ADDR_DATA_M	0xffff
+#define MDIO_ADDR_DATA_S	0
+
+#define MDIO_WDATA_DATA_M	0xffff
+#define MDIO_WDATA_DATA_S	0
+
+#define MDIO_RDATA_DATA_M	0xffff
+#define MDIO_RDATA_DATA_S	0
+
+#define MDIO_STATE_STA_B	0
+
+enum mdio_st_clause {
+	MDIO_ST_CLAUSE_45 = 0,
+	MDIO_ST_CLAUSE_22
+};
+
+enum mdio_c22_op_seq {
+	MDIO_C22_WRITE = 1,
+	MDIO_C22_READ = 2
+};
+
+enum mdio_c45_op_seq {
+	MDIO_C45_WRITE_ADDR = 0,
+	MDIO_C45_WRITE_DATA,
+	MDIO_C45_READ_INCREMENT,
+	MDIO_C45_READ
+};
+
+/* peri subctrl reg */
+#define MDIO_SC_CLK_EN		0x338
+#define MDIO_SC_CLK_DIS		0x33C
+#define MDIO_SC_RESET_REQ	0xA38
+#define MDIO_SC_RESET_DREQ	0xA3C
+#define MDIO_SC_CTRL		0x2010
+#define MDIO_SC_CLK_ST		0x531C
+#define MDIO_SC_RESET_ST	0x5A1C
+
+static void mdio_write_reg(void *base, u32 reg, u32 value)
+{
+	u8 __iomem *reg_addr = (u8 __iomem *)base;
+
+	writel_relaxed(value, reg_addr + reg);
+}
+
+#define MDIO_WRITE_REG(a, reg, value) \
+	mdio_write_reg((a)->vbase, (reg), (value))
+
+static u32 mdio_read_reg(void *base, u32 reg)
+{
+	u8 __iomem *reg_addr = (u8 __iomem *)base;
+
+	return readl_relaxed(reg_addr + reg);
+}
+
+#define mdio_set_field(origin, mask, shift, val) \
+	do { \
+		(origin) &= (~((mask) << (shift))); \
+		(origin) |= (((val) & (mask)) << (shift)); \
+	} while (0)
+
+#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
+
+static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+			       u32 val)
+{
+	u32 origin = mdio_read_reg(base, reg);
+
+	mdio_set_field(origin, mask, shift, val);
+	mdio_write_reg(base, reg, origin);
+}
+
+#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
+	mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
+
+static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+{
+	u32 origin;
+
+	origin = mdio_read_reg(base, reg);
+	return mdio_get_field(origin, mask, shift);
+}
+
+#define MDIO_GET_REG_FIELD(dev, reg, mask, shift) \
+		mdio_get_reg_field((dev)->vbase, (reg), (mask), (shift))
+
+#define MDIO_GET_REG_BIT(dev, reg, bit) \
+		mdio_get_reg_field((dev)->vbase, (reg), 0x1ull, (bit))
+
+#define MDIO_CHECK_SET_ST	1
+#define MDIO_CHECK_CLR_ST	0
+
+static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
+				 u32 cfg_reg, u32 set_val,
+				 u32 st_reg, u32 st_msk, u8 check_st)
+{
+	u32 time_cnt;
+	u32 reg_value;
+
+	mdio_write_reg((void *)mdio_dev->sys_vbase, cfg_reg, set_val);
+
+	for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
+		reg_value = mdio_read_reg((void *)mdio_dev->sys_vbase, st_reg);
+		reg_value &= st_msk;
+		if ((!!check_st) == (!!reg_value))
+			break;
+	}
+
+	if ((!!check_st) != (!!reg_value))
+		return -EBUSY;
+
+	return 0;
+}
+
+static int hns_mdio_wait_ready(struct mii_bus *bus)
+{
+	struct hns_mdio_device *mdio_dev = bus->priv;
+	int i;
+	u32 cmd_reg_value = 1;
+
+	/* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
+	/* after that can do read or write*/
+	for (i = 0; cmd_reg_value; i++) {
+		cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
+						 MDIO_COMMAND_REG,
+						 MDIO_CMD_START_B);
+		if (i == MDIO_TIMEOUT)
+			return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
+			       u8 is_c45, u8 op, u8 phy_id, u16 cmd)
+{
+	u32 cmd_reg_value;
+	u8 st = is_c45 ? MDIO_ST_CLAUSE_45 : MDIO_ST_CLAUSE_22;
+
+	cmd_reg_value = st << MDIO_CMD_ST_S;
+	cmd_reg_value |= op << MDIO_CMD_OP_S;
+	cmd_reg_value |=
+		(phy_id & MDIO_CMD_PRTAD_M) << MDIO_CMD_PRTAD_S;
+	cmd_reg_value |= (cmd & MDIO_CMD_DEVAD_M) << MDIO_CMD_DEVAD_S;
+	cmd_reg_value |= 1 << MDIO_CMD_START_B;
+
+	MDIO_WRITE_REG(mdio_dev, MDIO_COMMAND_REG, cmd_reg_value);
+}
+
+/**
+ * hns_mdio_write - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @regnum: register num
+ * @value: register value
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_write(struct mii_bus *bus,
+			  int phy_id, int regnum, u16 data)
+{
+	int ret;
+	struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+	u8 devad = ((regnum >> 16) & 0x1f);
+	u8 is_c45 = !!(regnum & MII_ADDR_C45);
+	u16 reg = (u16)(regnum & 0xffff);
+	u8 op;
+	u16 cmd_reg_cfg;
+
+	dev_dbg(&bus->dev, "mdio write %s,base is %p\n",
+		bus->id, mdio_dev->vbase);
+	dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x, write data=%d\n",
+		phy_id, is_c45, devad, reg, data);
+
+	/* wait for ready */
+	ret = hns_mdio_wait_ready(bus);
+	if (ret) {
+		dev_err(&bus->dev, "MDIO bus is busy\n");
+		return ret;
+	}
+
+	if (!is_c45) {
+		cmd_reg_cfg = reg;
+		op = MDIO_C22_WRITE;
+	} else {
+		/* config the cmd-reg to write addr*/
+		MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+				   MDIO_ADDR_DATA_S, reg);
+
+		hns_mdio_cmd_write(mdio_dev, is_c45,
+				   MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+		/* check for read or write opt is finished */
+		ret = hns_mdio_wait_ready(bus);
+		if (ret) {
+			dev_err(&bus->dev, "MDIO bus is busy\n");
+			return ret;
+		}
+
+		/* config the data needed writing */
+		cmd_reg_cfg = devad;
+		op = MDIO_C45_WRITE_ADDR;
+	}
+
+	MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
+			   MDIO_WDATA_DATA_S, data);
+
+	hns_mdio_cmd_write(mdio_dev, is_c45, op, phy_id, cmd_reg_cfg);
+
+	return 0;
+}
+
+/**
+ * hns_mdio_read - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @regnum: register num
+ * @value: register value
+ *
+ * Return phy register value
+ */
+static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+	int ret;
+	u16 reg_val = 0;
+	u8 devad = ((regnum >> 16) & 0x1f);
+	u8 is_c45 = !!(regnum & MII_ADDR_C45);
+	u16 reg = (u16)(regnum & 0xffff);
+	struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+
+	dev_dbg(&bus->dev, "mdio read %s,base is %p\n",
+		bus->id, mdio_dev->vbase);
+	dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x!\n",
+		phy_id, is_c45, devad, reg);
+
+	/* Step 1: wait for ready */
+	ret = hns_mdio_wait_ready(bus);
+	if (ret) {
+		dev_err(&bus->dev, "MDIO bus is busy\n");
+		return ret;
+	}
+
+	if (!is_c45) {
+		hns_mdio_cmd_write(mdio_dev, is_c45,
+				   MDIO_C22_READ, phy_id, reg);
+	} else {
+		MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+				   MDIO_ADDR_DATA_S, reg);
+
+		/* Step 2; config the cmd-reg to write addr*/
+		hns_mdio_cmd_write(mdio_dev, is_c45,
+				   MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+		/* Step 3: check for read or write opt is finished */
+		ret = hns_mdio_wait_ready(bus);
+		if (ret) {
+			dev_err(&bus->dev, "MDIO bus is busy\n");
+			return ret;
+		}
+
+		hns_mdio_cmd_write(mdio_dev, is_c45,
+				   MDIO_C45_WRITE_ADDR, phy_id, devad);
+	}
+
+	/* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
+	/* check for read or write opt is finished */
+	ret = hns_mdio_wait_ready(bus);
+	if (ret) {
+		dev_err(&bus->dev, "MDIO bus is busy\n");
+		return ret;
+	}
+
+	reg_val = MDIO_GET_REG_BIT(mdio_dev, MDIO_STA_REG, MDIO_STATE_STA_B);
+	if (reg_val) {
+		dev_err(&bus->dev, " ERROR! MDIO Read failed!\n");
+		return -EBUSY;
+	}
+
+	/* Step 6; get out data*/
+	reg_val = (u16)MDIO_GET_REG_FIELD(mdio_dev, MDIO_RDATA_REG,
+					  MDIO_RDATA_DATA_M, MDIO_RDATA_DATA_S);
+
+	return reg_val;
+}
+
+/**
+ * hns_mdio_reset - reset mdio bus
+ * @bus: mdio bus
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_reset(struct mii_bus *bus)
+{
+	struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+	int ret;
+
+	if (!mdio_dev->sys_vbase) {
+		dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
+		return -ENODEV;
+	}
+
+	/*1. reset req, and read reset st check*/
+	ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1,
+				    MDIO_SC_RESET_ST, 0x1,
+				    MDIO_CHECK_SET_ST);
+	if (ret) {
+		dev_err(&bus->dev, "MDIO reset fail\n");
+		return ret;
+	}
+
+	/*2. dis clk, and read clk st check*/
+	ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS,
+				    0x1, MDIO_SC_CLK_ST, 0x1,
+				    MDIO_CHECK_CLR_ST);
+	if (ret) {
+		dev_err(&bus->dev, "MDIO dis clk fail\n");
+		return ret;
+	}
+
+	/*3. reset dreq, and read reset st check*/
+	ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1,
+				    MDIO_SC_RESET_ST, 0x1,
+				    MDIO_CHECK_CLR_ST);
+	if (ret) {
+		dev_err(&bus->dev, "MDIO dis clk fail\n");
+		return ret;
+	}
+
+	/*4. en clk, and read clk st check*/
+	ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN,
+				    0x1, MDIO_SC_CLK_ST, 0x1,
+				    MDIO_CHECK_SET_ST);
+	if (ret)
+		dev_err(&bus->dev, "MDIO en clk fail\n");
+
+	return ret;
+}
+
+/**
+ * hns_mdio_bus_name - get mdio bus name
+ * @name: mdio bus name
+ * @np: mdio device node pointer
+ */
+static void hns_mdio_bus_name(char *name, struct device_node *np)
+{
+	const u32 *addr;
+	u64 taddr = OF_BAD_ADDR;
+
+	addr = of_get_address(np, 0, NULL, NULL);
+	if (addr)
+		taddr = of_translate_address(np, addr);
+
+	snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
+		 (unsigned long long)taddr);
+}
+
+/**
+ * hns_mdio_probe - probe mdio device
+ * @pdev: mdio platform device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_probe(struct platform_device *pdev)
+{
+	struct device_node *np;
+	struct hns_mdio_device *mdio_dev;
+	struct mii_bus *new_bus;
+	struct resource *res;
+	int ret;
+
+	if (!pdev) {
+		dev_err(NULL, "pdev is NULL!\r\n");
+		return -ENODEV;
+	}
+	np = pdev->dev.of_node;
+	mdio_dev = devm_kzalloc(&pdev->dev, sizeof(*mdio_dev), GFP_KERNEL);
+	if (!mdio_dev)
+		return -ENOMEM;
+
+	new_bus = devm_mdiobus_alloc(&pdev->dev);
+	if (!new_bus) {
+		dev_err(&pdev->dev, "mdiobus_alloc fail!\n");
+		return -ENOMEM;
+	}
+
+	new_bus->name = MDIO_BUS_NAME;
+	new_bus->read = hns_mdio_read;
+	new_bus->write = hns_mdio_write;
+	new_bus->reset = hns_mdio_reset;
+	new_bus->priv = mdio_dev;
+	hns_mdio_bus_name(new_bus->id, np);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	mdio_dev->vbase = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(mdio_dev->vbase)) {
+		ret = PTR_ERR(mdio_dev->vbase);
+		return ret;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	mdio_dev->sys_vbase = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(mdio_dev->sys_vbase)) {
+		ret = PTR_ERR(mdio_dev->sys_vbase);
+		return ret;
+	}
+
+	new_bus->irq = devm_kcalloc(&pdev->dev, PHY_MAX_ADDR,
+				    sizeof(int), GFP_KERNEL);
+	if (!new_bus->irq)
+		return -ENOMEM;
+
+	new_bus->parent = &pdev->dev;
+	platform_set_drvdata(pdev, new_bus);
+
+	ret = of_mdiobus_register(new_bus, np);
+	if (ret) {
+		dev_err(&pdev->dev, "Cannot register as MDIO bus!\n");
+		platform_set_drvdata(pdev, NULL);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * hns_mdio_remove - remove mdio device
+ * @pdev: mdio platform device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_remove(struct platform_device *pdev)
+{
+	struct mii_bus *bus;
+
+	bus = platform_get_drvdata(pdev);
+
+	mdiobus_unregister(bus);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct of_device_id hns_mdio_match[] = {
+	{.compatible = "hisilicon,mdio"},
+	{.compatible = "hisilicon,hns-mdio"},
+	{}
+};
+
+static struct platform_driver hns_mdio_driver = {
+	.probe = hns_mdio_probe,
+	.remove = hns_mdio_remove,
+	.driver = {
+		   .name = MDIO_DRV_NAME,
+		   .of_match_table = hns_mdio_match,
+		   },
+};
+
+module_platform_driver(hns_mdio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("Hisilicon HNS MDIO driver");
+MODULE_ALIAS("platform:" MDIO_DRV_NAME);
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 28df374..93ae114 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -181,7 +181,7 @@
 	struct mal_commac		commac;
 
 	/* PHY infos */
-	u32				phy_mode;
+	int				phy_mode;
 	u32				phy_map;
 	u32				phy_address;
 	u32				phy_feat_exc;
@@ -460,8 +460,8 @@
 	u32 index;
 };
 
-#define EMAC_ETHTOOL_REGS_VER		0
-#define EMAC4_ETHTOOL_REGS_VER		1
-#define EMAC4SYNC_ETHTOOL_REGS_VER	2
+#define EMAC_ETHTOOL_REGS_VER		3
+#define EMAC4_ETHTOOL_REGS_VER		4
+#define EMAC4SYNC_ETHTOOL_REGS_VER	5
 
 #endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 45c8c864..b1af0d6 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -3900,10 +3900,6 @@
 		return E1000_SUCCESS;
 	}
 
-	/* If eeprom is not yet detected, do so now */
-	if (eeprom->word_size == 0)
-		e1000_init_eeprom_params(hw);
-
 	/* A check for invalid values:  offset too large, too many words, and
 	 * not enough words.
 	 */
@@ -4074,10 +4070,6 @@
 		return E1000_SUCCESS;
 	}
 
-	/* If eeprom is not yet detected, do so now */
-	if (eeprom->word_size == 0)
-		e1000_init_eeprom_params(hw);
-
 	/* A check for invalid values:  offset too large, too many words, and
 	 * not enough words.
 	 */
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index faf4b3f..2e2ddec0 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6952,6 +6952,7 @@
 #endif
 	.ndo_set_features = e1000_set_features,
 	.ndo_fix_features = e1000_fix_features,
+	.ndo_features_check	= passthru_features_check,
 };
 
 /**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index c8c8c5b..1444020 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -101,12 +101,19 @@
 	u64 csum_err;
 	u64 tx_busy;
 	u64 tx_done_old;
+	u64 csum_good;
 };
 
 struct fm10k_rx_queue_stats {
 	u64 alloc_failed;
 	u64 csum_err;
 	u64 errors;
+	u64 csum_good;
+	u64 switch_errors;
+	u64 drops;
+	u64 pp_errors;
+	u64 link_errors;
+	u64 length_errors;
 };
 
 struct fm10k_ring {
@@ -251,6 +258,7 @@
 #define FM10K_FLAG_RSS_FIELD_IPV6_UDP		(u32)(1 << 2)
 #define FM10K_FLAG_RX_TS_ENABLED		(u32)(1 << 3)
 #define FM10K_FLAG_SWPRI_CONFIG			(u32)(1 << 4)
+#define FM10K_FLAG_DEBUG_STATS			(u32)(1 << 5)
 	int xcast_mode;
 
 	/* Tx fast path data */
@@ -277,6 +285,17 @@
 	u64 rx_drops_nic;
 	u64 rx_overrun_pf;
 	u64 rx_overrun_vf;
+
+	/* Debug Statistics */
+	u64 hw_sm_mbx_full;
+	u64 hw_csum_tx_good;
+	u64 hw_csum_rx_good;
+	u64 rx_switch_errors;
+	u64 rx_drops;
+	u64 rx_pp_errors;
+	u64 rx_link_errors;
+	u64 rx_length_errors;
+
 	u32 tx_timeout_count;
 
 	/* RX */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index c6dc968..4ef2fbd 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -76,19 +76,22 @@
 	FM10K_STAT("mac_rules_used", hw.swapi.mac.used),
 	FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
 
-	FM10K_STAT("mbx_tx_busy", hw.mbx.tx_busy),
-	FM10K_STAT("mbx_tx_oversized", hw.mbx.tx_dropped),
-	FM10K_STAT("mbx_tx_messages", hw.mbx.tx_messages),
-	FM10K_STAT("mbx_tx_dwords", hw.mbx.tx_dwords),
-	FM10K_STAT("mbx_rx_messages", hw.mbx.rx_messages),
-	FM10K_STAT("mbx_rx_dwords", hw.mbx.rx_dwords),
-	FM10K_STAT("mbx_rx_parse_err", hw.mbx.rx_parse_err),
-
 	FM10K_STAT("tx_hang_count", tx_timeout_count),
 
 	FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
 };
 
+static const struct fm10k_stats fm10k_gstrings_debug_stats[] = {
+	FM10K_STAT("hw_sm_mbx_full", hw_sm_mbx_full),
+	FM10K_STAT("hw_csum_tx_good", hw_csum_tx_good),
+	FM10K_STAT("hw_csum_rx_good", hw_csum_rx_good),
+	FM10K_STAT("rx_switch_errors", rx_switch_errors),
+	FM10K_STAT("rx_drops", rx_drops),
+	FM10K_STAT("rx_pp_errors", rx_pp_errors),
+	FM10K_STAT("rx_link_errors", rx_link_errors),
+	FM10K_STAT("rx_length_errors", rx_length_errors),
+};
+
 static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
 	FM10K_STAT("timeout", stats.timeout.count),
 	FM10K_STAT("ur", stats.ur.count),
@@ -100,14 +103,33 @@
 	FM10K_STAT("nodesc_drop", stats.nodesc_drop.count),
 };
 
+#define FM10K_MBX_STAT(_name, _stat) { \
+	.stat_string = _name, \
+	.sizeof_stat = FIELD_SIZEOF(struct fm10k_mbx_info, _stat), \
+	.stat_offset = offsetof(struct fm10k_mbx_info, _stat) \
+}
+
+static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
+	FM10K_MBX_STAT("mbx_tx_busy", tx_busy),
+	FM10K_MBX_STAT("mbx_tx_oversized", tx_dropped),
+	FM10K_MBX_STAT("mbx_tx_messages", tx_messages),
+	FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords),
+	FM10K_MBX_STAT("mbx_rx_messages", rx_messages),
+	FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords),
+	FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err),
+};
+
 #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
+#define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats)
 #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
+#define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
 
 #define FM10K_QUEUE_STATS_LEN(_n) \
 	( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
 
 #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
-				FM10K_NETDEV_STATS_LEN)
+				FM10K_NETDEV_STATS_LEN + \
+				FM10K_MBX_STATS_LEN)
 
 static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = {
 	"Mailbox test (on/offline)"
@@ -120,11 +142,85 @@
 	FM10K_TEST_MAX = FM10K_TEST_LEN
 };
 
-static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+enum {
+	FM10K_PRV_FLAG_DEBUG_STATS,
+	FM10K_PRV_FLAG_LEN,
+};
+
+static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
+	"debug-statistics",
+};
+
+static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
 {
 	struct fm10k_intfc *interface = netdev_priv(dev);
+	struct fm10k_iov_data *iov_data = interface->iov_data;
 	char *p = (char *)data;
 	unsigned int i;
+	unsigned int j;
+
+	for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
+		memcpy(p, fm10k_gstrings_net_stats[i].stat_string,
+		       ETH_GSTRING_LEN);
+		p += ETH_GSTRING_LEN;
+	}
+
+	for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
+		memcpy(p, fm10k_gstrings_global_stats[i].stat_string,
+		       ETH_GSTRING_LEN);
+		p += ETH_GSTRING_LEN;
+	}
+
+	if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
+		for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
+			memcpy(p, fm10k_gstrings_debug_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+	}
+
+	for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
+		memcpy(p, fm10k_gstrings_mbx_stats[i].stat_string,
+		       ETH_GSTRING_LEN);
+		p += ETH_GSTRING_LEN;
+	}
+
+	if (interface->hw.mac.type != fm10k_mac_vf) {
+		for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
+			memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+	}
+
+	if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
+		for (i = 0; i < iov_data->num_vfs; i++) {
+			for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
+				snprintf(p,
+					 ETH_GSTRING_LEN,
+					 "vf_%u_%s", i,
+					 fm10k_gstrings_mbx_stats[j].stat_string);
+				p += ETH_GSTRING_LEN;
+			}
+		}
+	}
+
+	for (i = 0; i < interface->hw.mac.max_queues; i++) {
+		sprintf(p, "tx_queue_%u_packets", i);
+		p += ETH_GSTRING_LEN;
+		sprintf(p, "tx_queue_%u_bytes", i);
+		p += ETH_GSTRING_LEN;
+		sprintf(p, "rx_queue_%u_packets", i);
+		p += ETH_GSTRING_LEN;
+		sprintf(p, "rx_queue_%u_bytes", i);
+		p += ETH_GSTRING_LEN;
+	}
+}
+
+static void fm10k_get_strings(struct net_device *dev,
+			      u32 stringset, u8 *data)
+{
+	char *p = (char *)data;
 
 	switch (stringset) {
 	case ETH_SS_TEST:
@@ -132,35 +228,11 @@
 		       FM10K_TEST_LEN * ETH_GSTRING_LEN);
 		break;
 	case ETH_SS_STATS:
-		for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
-			memcpy(p, fm10k_gstrings_net_stats[i].stat_string,
-			       ETH_GSTRING_LEN);
-			p += ETH_GSTRING_LEN;
-		}
-		for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
-			memcpy(p, fm10k_gstrings_global_stats[i].stat_string,
-			       ETH_GSTRING_LEN);
-			p += ETH_GSTRING_LEN;
-		}
-
-		if (interface->hw.mac.type != fm10k_mac_vf) {
-			for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
-				memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
-				       ETH_GSTRING_LEN);
-				p += ETH_GSTRING_LEN;
-			}
-		}
-
-		for (i = 0; i < interface->hw.mac.max_queues; i++) {
-			sprintf(p, "tx_queue_%u_packets", i);
-			p += ETH_GSTRING_LEN;
-			sprintf(p, "tx_queue_%u_bytes", i);
-			p += ETH_GSTRING_LEN;
-			sprintf(p, "rx_queue_%u_packets", i);
-			p += ETH_GSTRING_LEN;
-			sprintf(p, "rx_queue_%u_bytes", i);
-			p += ETH_GSTRING_LEN;
-		}
+		fm10k_get_stat_strings(dev, data);
+		break;
+	case ETH_SS_PRIV_FLAGS:
+		memcpy(p, fm10k_prv_flags,
+		       FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN);
 		break;
 	}
 }
@@ -168,6 +240,7 @@
 static int fm10k_get_sset_count(struct net_device *dev, int sset)
 {
 	struct fm10k_intfc *interface = netdev_priv(dev);
+	struct fm10k_iov_data *iov_data = interface->iov_data;
 	struct fm10k_hw *hw = &interface->hw;
 	int stats_len = FM10K_STATIC_STATS_LEN;
 
@@ -180,7 +253,16 @@
 		if (hw->mac.type != fm10k_mac_vf)
 			stats_len += FM10K_PF_STATS_LEN;
 
+		if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
+			stats_len += FM10K_DEBUG_STATS_LEN;
+
+			if (iov_data)
+				stats_len += FM10K_MBX_STATS_LEN * iov_data->num_vfs;
+		}
+
 		return stats_len;
+	case ETH_SS_PRIV_FLAGS:
+		return FM10K_PRV_FLAG_LEN;
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -192,6 +274,7 @@
 {
 	const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64);
 	struct fm10k_intfc *interface = netdev_priv(netdev);
+	struct fm10k_iov_data *iov_data = interface->iov_data;
 	struct net_device_stats *net_stats = &netdev->stats;
 	char *p;
 	int i, j;
@@ -211,13 +294,47 @@
 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 	}
 
-	if (interface->hw.mac.type != fm10k_mac_vf)
+	if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
+		for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
+			p = (char *)interface + fm10k_gstrings_debug_stats[i].stat_offset;
+			*(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat ==
+				     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+		}
+	}
+
+	for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
+		p = (char *)&interface->hw.mbx + fm10k_gstrings_mbx_stats[i].stat_offset;
+		*(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat ==
+			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+	}
+
+	if (interface->hw.mac.type != fm10k_mac_vf) {
 		for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
 			p = (char *)interface +
 			    fm10k_gstrings_pf_stats[i].stat_offset;
 			*(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat ==
 				     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 		}
+	}
+
+	if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
+		for (i = 0; i < iov_data->num_vfs; i++) {
+			struct fm10k_vf_info *vf_info;
+			vf_info = &iov_data->vf_info[i];
+
+			/* skip stats if we don't have a vf info */
+			if (!vf_info) {
+				data += FM10K_MBX_STATS_LEN;
+				continue;
+			}
+
+			for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
+				p = (char *)&vf_info->mbx + fm10k_gstrings_mbx_stats[j].stat_offset;
+				*(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat ==
+					     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+			}
+		}
+	}
 
 	for (i = 0; i < interface->hw.mac.max_queues; i++) {
 		struct fm10k_ring *ring;
@@ -881,6 +998,33 @@
 		eth_test->flags |= ETH_TEST_FL_FAILED;
 }
 
+static u32 fm10k_get_priv_flags(struct net_device *netdev)
+{
+	struct fm10k_intfc *interface = netdev_priv(netdev);
+	u32 priv_flags = 0;
+
+	if (interface->flags & FM10K_FLAG_DEBUG_STATS)
+		priv_flags |= 1 << FM10K_PRV_FLAG_DEBUG_STATS;
+
+	return priv_flags;
+}
+
+static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+	struct fm10k_intfc *interface = netdev_priv(netdev);
+
+	if (priv_flags >= (1 << FM10K_PRV_FLAG_LEN))
+		return -EINVAL;
+
+	if (priv_flags & (1 << FM10K_PRV_FLAG_DEBUG_STATS))
+		interface->flags |= FM10K_FLAG_DEBUG_STATS;
+	else
+		interface->flags &= ~FM10K_FLAG_DEBUG_STATS;
+
+	return 0;
+}
+
+
 static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
 {
 	return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
@@ -1094,6 +1238,8 @@
 	.get_regs               = fm10k_get_regs,
 	.get_regs_len           = fm10k_get_regs_len,
 	.self_test		= fm10k_self_test,
+	.get_priv_flags		= fm10k_get_priv_flags,
+	.set_priv_flags		= fm10k_set_priv_flags,
 	.get_rxfh_indir_size	= fm10k_get_reta_size,
 	.get_rxfh_key_size	= fm10k_get_rssrk_size,
 	.get_rxfh		= fm10k_get_rssh,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 0e25a80..acfb8b1f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -137,8 +137,11 @@
 		}
 
 		/* guarantee we have free space in the SM mailbox */
-		if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
+		if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
+			/* keep track of how many times this occurs */
+			interface->hw_sm_mbx_full++;
 			break;
+		}
 
 		/* cleanup mailbox and process received messages */
 		mbx->ops.process(hw, mbx);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 92d4155..2f47bfe 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -398,6 +398,8 @@
 		return;
 
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	ring->rx_stats.csum_good++;
 }
 
 #define FM10K_RSS_L4_TYPES_MASK \
@@ -556,6 +558,18 @@
 {
 	if (unlikely((fm10k_test_staterr(rx_desc,
 					 FM10K_RXD_STATUS_RXE)))) {
+#define FM10K_TEST_RXD_BIT(rxd, bit) \
+	((rxd)->w.csum_err & cpu_to_le16(bit))
+		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR))
+			rx_ring->rx_stats.switch_errors++;
+		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR))
+			rx_ring->rx_stats.drops++;
+		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR))
+			rx_ring->rx_stats.pp_errors++;
+		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY))
+			rx_ring->rx_stats.link_errors++;
+		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG))
+			rx_ring->rx_stats.length_errors++;
 		dev_kfree_skb_any(skb);
 		rx_ring->rx_stats.errors++;
 		return true;
@@ -881,6 +895,7 @@
 
 	/* update TX checksum flag */
 	first->tx_flags |= FM10K_TX_FLAGS_CSUM;
+	tx_ring->tx_stats.csum_good++;
 
 no_csum:
 	/* populate Tx descriptor header size and mss */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 1a4b526..af09a1b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -129,8 +129,8 @@
  *  fm10k_fifo_drop_all - Drop all messages in FIFO
  *  @fifo: pointer to FIFO
  *
- *  This function resets the head pointer to drop all messages in the FIFO,
- *  and ensure the FIFO is empty.
+ *  This function resets the head pointer to drop all messages in the FIFO and
+ *  ensure the FIFO is empty.
  **/
 static void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo)
 {
@@ -899,6 +899,27 @@
 }
 
 /**
+ *  fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mailbox header
+ *  @mbx: pointer to mailbox
+ *
+ *  This function creates a fake disconnect header for loading into remote
+ *  mailbox header. The primary purpose is to prevent errors on immediate
+ *  start up after mbx->connect.
+ **/
+static void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx)
+{
+	u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) |
+		  FM10K_MSG_HDR_FIELD_SET(mbx->head, TAIL) |
+		  FM10K_MSG_HDR_FIELD_SET(mbx->tail, HEAD);
+	u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1);
+
+	mbx->mbx_lock |= FM10K_MBX_ACK;
+
+	/* load header to memory to be written */
+	mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC);
+}
+
+/**
  *  fm10k_mbx_create_error_msg - Generate a error message
  *  @mbx: pointer to mailbox
  *  @err: local error encountered
@@ -1046,9 +1067,26 @@
  **/
 static void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx)
 {
+	u16 len, head, ack;
+
 	/* reset our outgoing max size back to Rx limits */
 	mbx->max_size = mbx->rx.size - 1;
 
+	/* update mbx->pulled to account for tail_len and ack */
+	head = FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, HEAD);
+	ack = fm10k_mbx_index_len(mbx, head, mbx->tail);
+	mbx->pulled += mbx->tail_len - ack;
+
+	/* now drop any messages which have started or finished transmitting */
+	while (fm10k_fifo_head_len(&mbx->tx) && mbx->pulled) {
+		len = fm10k_fifo_head_drop(&mbx->tx);
+		mbx->tx_dropped++;
+		if (mbx->pulled >= len)
+			mbx->pulled -= len;
+		else
+			mbx->pulled = 0;
+	}
+
 	/* just do a quick resysnc to start of message */
 	mbx->pushed = 0;
 	mbx->pulled = 0;
@@ -1418,8 +1456,10 @@
 	/* Place mbx in ready to connect state */
 	mbx->state = FM10K_STATE_CONNECT;
 
+	fm10k_mbx_reset_work(mbx);
+
 	/* initialize header of remote mailbox */
-	fm10k_mbx_create_disconnect_hdr(mbx);
+	fm10k_mbx_create_fake_disconnect_hdr(mbx);
 	fm10k_write_reg(hw, mbx->mbmem_reg ^ mbx->mbmem_len, mbx->mbx_hdr);
 
 	/* enable interrupt and notify other party of new message */
@@ -1725,7 +1765,7 @@
 	mbx->state = FM10K_STATE_CLOSED;
 	mbx->remote = 0;
 	fm10k_mbx_reset_work(mbx);
-	fm10k_mbx_update_max_size(mbx, 0);
+	fm10k_fifo_drop_all(&mbx->tx);
 
 	fm10k_write_reg(hw, mbx->mbmem_reg, 0);
 }
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 3d71c52..74be792 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -274,8 +274,6 @@
  * @interface: board private structure
  *
  * This function will process both the upstream and downstream mailboxes.
- * It is necessary for us to hold the rtnl_lock while doing this as the
- * mailbox accesses are protected by this lock.
  **/
 static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
 {
@@ -330,6 +328,9 @@
 {
 	struct net_device_stats *net_stats = &interface->netdev->stats;
 	struct fm10k_hw *hw = &interface->hw;
+	u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0;
+	u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0;
+	u64 rx_link_errors = 0;
 	u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0;
 	u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0;
 	u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0;
@@ -349,6 +350,7 @@
 		tx_csum_errors += tx_ring->tx_stats.csum_err;
 		bytes += tx_ring->stats.bytes;
 		pkts += tx_ring->stats.packets;
+		hw_csum_tx_good += tx_ring->tx_stats.csum_good;
 	}
 
 	interface->restart_queue = restart_queue;
@@ -356,6 +358,8 @@
 	net_stats->tx_bytes = bytes;
 	net_stats->tx_packets = pkts;
 	interface->tx_csum_errors = tx_csum_errors;
+	interface->hw_csum_tx_good = hw_csum_tx_good;
+
 	/* gather some stats to the interface struct that are per queue */
 	for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
 		struct fm10k_ring *rx_ring = interface->rx_ring[i];
@@ -365,12 +369,24 @@
 		alloc_failed += rx_ring->rx_stats.alloc_failed;
 		rx_csum_errors += rx_ring->rx_stats.csum_err;
 		rx_errors += rx_ring->rx_stats.errors;
+		hw_csum_rx_good += rx_ring->rx_stats.csum_good;
+		rx_switch_errors += rx_ring->rx_stats.switch_errors;
+		rx_drops += rx_ring->rx_stats.drops;
+		rx_pp_errors += rx_ring->rx_stats.pp_errors;
+		rx_link_errors += rx_ring->rx_stats.link_errors;
+		rx_length_errors += rx_ring->rx_stats.length_errors;
 	}
 
 	net_stats->rx_bytes = bytes;
 	net_stats->rx_packets = pkts;
 	interface->alloc_failed = alloc_failed;
 	interface->rx_csum_errors = rx_csum_errors;
+	interface->hw_csum_rx_good = hw_csum_rx_good;
+	interface->rx_switch_errors = rx_switch_errors;
+	interface->rx_drops = rx_drops;
+	interface->rx_pp_errors = rx_pp_errors;
+	interface->rx_link_errors = rx_link_errors;
+	interface->rx_length_errors = rx_length_errors;
 
 	hw->mac.ops.update_hw_stats(hw, &interface->stats);
 
@@ -498,7 +514,7 @@
 
 	interface = container_of(work, struct fm10k_intfc, service_task);
 
-	/* tasks always capable of running, but must be rtnl protected */
+	/* tasks run even when interface is down */
 	fm10k_mbx_subtask(interface);
 	fm10k_detach_subtask(interface);
 	fm10k_reset_subtask(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index bac8d48..318a212 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -762,6 +762,12 @@
 #define FM10K_RXD_STATUS_L4E		0x4000 /* L4 csum error */
 #define FM10K_RXD_STATUS_IPE		0x8000 /* IPv4 csum error */
 
+#define FM10K_RXD_ERR_SWITCH_ERROR	0x0001 /* Switch found bad packet */
+#define FM10K_RXD_ERR_NO_DESCRIPTOR	0x0002 /* No descriptor available */
+#define FM10K_RXD_ERR_PP_ERROR		0x0004 /* RAM error during processing */
+#define FM10K_RXD_ERR_SWITCH_READY	0x0008 /* Link transition mid-packet */
+#define FM10K_RXD_ERR_TOO_BIG		0x0010 /* Pkt too big for single buf */
+
 struct fm10k_ftag {
 	__be16 swpri_type_user;
 	__be16 vlan;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index e746279..f26dcb2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -71,7 +71,6 @@
 #define I40E_MAX_VEB          16
 
 #define I40E_MAX_NUM_DESCRIPTORS      4096
-#define I40E_MAX_REGISTER     0x800000
 #define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
 #define I40E_DEFAULT_NUM_DESCRIPTORS  512
 #define I40E_REQ_DESCRIPTOR_MULTIPLE  32
@@ -98,10 +97,11 @@
 #define I40E_MAX_USER_PRIORITY        8
 #define I40E_DEFAULT_MSG_ENABLE       4
 #define I40E_QUEUE_WAIT_RETRY_LIMIT   10
-#define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 9)
+#define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 16)
 
 /* Ethtool Private Flags */
 #define I40E_PRIV_FLAGS_NPAR_FLAG	BIT(0)
+#define I40E_PRIV_FLAGS_LINKPOLL_FLAG	BIT(1)
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -243,7 +243,6 @@
 	struct pci_dev *pdev;
 	struct i40e_hw hw;
 	unsigned long state;
-	unsigned long link_check_timeout;
 	struct msix_entry *msix_entries;
 	bool fc_autoneg_status;
 
@@ -327,7 +326,9 @@
 #define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE	BIT_ULL(33)
 #define I40E_FLAG_128_QP_RSS_CAPABLE		BIT_ULL(34)
 #define I40E_FLAG_WB_ON_ITR_CAPABLE		BIT_ULL(35)
+#define I40E_FLAG_VEB_STATS_ENABLED		BIT_ULL(37)
 #define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE	BIT_ULL(38)
+#define I40E_FLAG_LINK_POLLING_ENABLED		BIT_ULL(39)
 #define I40E_FLAG_VEB_MODE_ENABLED		BIT_ULL(40)
 
 	/* tracks features that get auto disabled by errors */
@@ -409,6 +410,9 @@
 	/* These are only valid in NPAR modes */
 	u32 npar_max_bw;
 	u32 npar_min_bw;
+
+	u32 ioremap_len;
+	u32 fd_inv;
 };
 
 struct i40e_mac_filter {
@@ -474,6 +478,7 @@
 #endif
 	u32 tx_restart;
 	u32 tx_busy;
+	u64 tx_linearize;
 	u32 rx_buf_failed;
 	u32 rx_page_failed;
 
@@ -534,6 +539,7 @@
 	u16 idx;               /* index in pf->vsi[] */
 	u16 veb_idx;           /* index of VEB parent */
 	struct kobject *kobj;  /* sysfs object */
+	bool current_isup;     /* Sync 'link up' logging */
 
 	/* VSI specific handlers */
 	irqreturn_t (*irq_handler)(int irq, void *data);
@@ -667,7 +673,7 @@
 					bool is_vf, bool is_netdev);
 void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
 		     bool is_vf, bool is_netdev);
-int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl);
 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
 				u16 uplink, u32 param1);
 int i40e_vsi_release(struct i40e_vsi *vsi);
@@ -700,7 +706,24 @@
 static inline void i40e_dbg_init(void) {}
 static inline void i40e_dbg_exit(void) {}
 #endif /* CONFIG_DEBUG_FS*/
-void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+/**
+ * i40e_irq_dynamic_enable - Enable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector, without base_vector
+ **/
+static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	u32 val;
+
+	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+	      I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+	wr32(hw, I40E_PFINT_DYN_CTLN(vector + vsi->base_vector - 1), val);
+	/* skip the flush */
+}
+
 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
@@ -739,7 +762,7 @@
 u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
 void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
 void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
-int i40e_init_pf_fcoe(struct i40e_pf *pf);
+void i40e_init_pf_fcoe(struct i40e_pf *pf);
 int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
 void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
 int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
@@ -771,4 +794,5 @@
 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
 #endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 3e0d200..fa2e916 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -482,8 +482,12 @@
 {
 	i40e_status ret_code = 0;
 
-	if (hw->aq.asq.count == 0)
-		return I40E_ERR_NOT_READY;
+	mutex_lock(&hw->aq.asq_mutex);
+
+	if (hw->aq.asq.count == 0) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto shutdown_asq_out;
+	}
 
 	/* Stop firmware AdminQ processing */
 	wr32(hw, hw->aq.asq.head, 0);
@@ -492,16 +496,13 @@
 	wr32(hw, hw->aq.asq.bal, 0);
 	wr32(hw, hw->aq.asq.bah, 0);
 
-	/* make sure lock is available */
-	mutex_lock(&hw->aq.asq_mutex);
-
 	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
 
 	/* free ring buffers */
 	i40e_free_asq_bufs(hw);
 
+shutdown_asq_out:
 	mutex_unlock(&hw->aq.asq_mutex);
-
 	return ret_code;
 }
 
@@ -515,8 +516,12 @@
 {
 	i40e_status ret_code = 0;
 
-	if (hw->aq.arq.count == 0)
-		return I40E_ERR_NOT_READY;
+	mutex_lock(&hw->aq.arq_mutex);
+
+	if (hw->aq.arq.count == 0) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto shutdown_arq_out;
+	}
 
 	/* Stop firmware AdminQ processing */
 	wr32(hw, hw->aq.arq.head, 0);
@@ -525,16 +530,13 @@
 	wr32(hw, hw->aq.arq.bal, 0);
 	wr32(hw, hw->aq.arq.bah, 0);
 
-	/* make sure lock is available */
-	mutex_lock(&hw->aq.arq_mutex);
-
 	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
 
 	/* free ring buffers */
 	i40e_free_arq_bufs(hw);
 
+shutdown_arq_out:
 	mutex_unlock(&hw->aq.arq_mutex);
-
 	return ret_code;
 }
 
@@ -657,6 +659,9 @@
 
 	/* destroy the locks */
 
+	if (hw->nvm_buff.va)
+		i40e_free_virt_mem(hw, &hw->nvm_buff);
+
 	return ret_code;
 }
 
@@ -678,8 +683,7 @@
 	details = I40E_ADMINQ_DETAILS(*asq, ntc);
 	while (rd32(hw, hw->aq.asq.head) != ntc) {
 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "%s: ntc %d head %d.\n", __func__, ntc,
-			   rd32(hw, hw->aq.asq.head));
+			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
 
 		if (details->callback) {
 			I40E_ADMINQ_CALLBACK cb_func =
@@ -742,19 +746,23 @@
 	u16  retval = 0;
 	u32  val = 0;
 
-	val = rd32(hw, hw->aq.asq.head);
-	if (val >= hw->aq.num_asq_entries) {
-		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: head overrun at %d\n", val);
-		status = I40E_ERR_QUEUE_EMPTY;
-		goto asq_send_command_exit;
-	}
+	mutex_lock(&hw->aq.asq_mutex);
 
 	if (hw->aq.asq.count == 0) {
 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 			   "AQTX: Admin queue not initialized.\n");
 		status = I40E_ERR_QUEUE_EMPTY;
-		goto asq_send_command_exit;
+		goto asq_send_command_error;
+	}
+
+	hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+	val = rd32(hw, hw->aq.asq.head);
+	if (val >= hw->aq.num_asq_entries) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: head overrun at %d\n", val);
+		status = I40E_ERR_QUEUE_EMPTY;
+		goto asq_send_command_error;
 	}
 
 	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
@@ -779,8 +787,6 @@
 	desc->flags &= ~cpu_to_le16(details->flags_dis);
 	desc->flags |= cpu_to_le16(details->flags_ena);
 
-	mutex_lock(&hw->aq.asq_mutex);
-
 	if (buff_size > hw->aq.asq_buf_size) {
 		i40e_debug(hw,
 			   I40E_DEBUG_AQ_MESSAGE,
@@ -889,6 +895,10 @@
 		   "AQTX: desc and buffer writeback:\n");
 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
 
+	/* save writeback aq if requested */
+	if (details->wb_desc)
+		*details->wb_desc = *desc_on_ring;
+
 	/* update the error if time out occurred */
 	if ((!cmd_completed) &&
 	    (!details->async && !details->postpone)) {
@@ -900,7 +910,6 @@
 
 asq_send_command_error:
 	mutex_unlock(&hw->aq.asq_mutex);
-asq_send_command_exit:
 	return status;
 }
 
@@ -946,6 +955,13 @@
 	/* take the lock before we start messing with the ring */
 	mutex_lock(&hw->aq.arq_mutex);
 
+	if (hw->aq.arq.count == 0) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQRX: Admin queue not initialized.\n");
+		ret_code = I40E_ERR_QUEUE_EMPTY;
+		goto clean_arq_element_err;
+	}
+
 	/* set next_to_use to head */
 	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
 	if (ntu == ntc) {
@@ -1007,6 +1023,8 @@
 	/* Set pending if needed, unlock and return */
 	if (pending != NULL)
 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
 	mutex_unlock(&hw->aq.arq_mutex);
 
 	if (i40e_is_nvm_update_op(&e->desc)) {
@@ -1014,6 +1032,19 @@
 			i40e_release_nvm(hw);
 			hw->aq.nvm_release_on_done = false;
 		}
+
+		switch (hw->nvmupd_state) {
+		case I40E_NVMUPD_STATE_INIT_WAIT:
+			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+			break;
+
+		case I40E_NVMUPD_STATE_WRITE_WAIT:
+			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+			break;
+
+		default:
+			break;
+		}
 	}
 
 	return ret_code;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 28e519a..12fbbdd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -69,6 +69,7 @@
 	u16 flags_dis;
 	bool async;
 	bool postpone;
+	struct i40e_aq_desc *wb_desc;
 };
 
 #define I40E_ADMINQ_DETAILS(R, i)   \
@@ -108,9 +109,10 @@
 
 /**
  * i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_rc: AdminQ error code to convert
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
  **/
-static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
+static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
 {
 	int aq_to_posix[] = {
 		0,           /* I40E_AQ_RC_OK */
@@ -142,8 +144,9 @@
 	if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
 		return -EAGAIN;
 
-	if (aq_rc >= ARRAY_SIZE(aq_to_posix))
+	if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
 		return -ERANGE;
+
 	return aq_to_posix[aq_rc];
 }
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 95d23bf..785b3db 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -2062,6 +2062,7 @@
 #define I40E_AQC_CEE_APP_ISCSI_MASK	(0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
 #define I40E_AQC_CEE_APP_FIP_SHIFT	0x8
 #define I40E_AQC_CEE_APP_FIP_MASK	(0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+
 #define I40E_AQC_CEE_PG_STATUS_SHIFT	0x0
 #define I40E_AQC_CEE_PG_STATUS_MASK	(0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
 #define I40E_AQC_CEE_PFC_STATUS_SHIFT	0x3
@@ -2070,10 +2071,19 @@
 #define I40E_AQC_CEE_APP_STATUS_MASK	(0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
 #define I40E_AQC_CEE_FCOE_STATUS_SHIFT	0x8
 #define I40E_AQC_CEE_FCOE_STATUS_MASK	(0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
-#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT	0xA
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT	0xB
 #define I40E_AQC_CEE_ISCSI_STATUS_MASK	(0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
 #define I40E_AQC_CEE_FIP_STATUS_SHIFT	0x10
 #define I40E_AQC_CEE_FIP_STATUS_MASK	(0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct.  Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
 struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
 	u8	reserved1;
 	u8	oper_num_tc;
@@ -2081,9 +2091,9 @@
 	u8	reserved2;
 	u8	oper_tc_bw[8];
 	u8	oper_pfc_en;
-	u8	reserved3;
+	u8	reserved3[2];
 	__le16	oper_app_prio;
-	u8	reserved4;
+	u8	reserved4[2];
 	__le16	tlv_status;
 };
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 114dc64..2d012d9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -51,7 +51,9 @@
 		case I40E_DEV_ID_QSFP_B:
 		case I40E_DEV_ID_QSFP_C:
 		case I40E_DEV_ID_10G_BASE_T:
+		case I40E_DEV_ID_10G_BASE_T4:
 		case I40E_DEV_ID_20G_KR2:
+		case I40E_DEV_ID_20G_KR2_A:
 			hw->mac.type = I40E_MAC_XL710;
 			break;
 		case I40E_DEV_ID_SFP_X722:
@@ -441,9 +443,6 @@
 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
 
-	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
-	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
-
 	status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
 
 	return status;
@@ -518,8 +517,6 @@
 					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
 					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
 	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
-	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
-	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
 
 	status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
 
@@ -1038,7 +1035,7 @@
 	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
 
 	if (flags & I40E_AQC_LAN_ADDR_VALID)
-		memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+		ether_addr_copy(mac_addr, addrs.pf_lan_mac);
 
 	return status;
 }
@@ -1061,7 +1058,7 @@
 		return status;
 
 	if (flags & I40E_AQC_PORT_ADDR_VALID)
-		memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+		ether_addr_copy(mac_addr, addrs.port_mac);
 	else
 		status = I40E_ERR_INVALID_MAC_ADDR;
 
@@ -1119,7 +1116,7 @@
 		return status;
 
 	if (flags & I40E_AQC_SAN_ADDR_VALID)
-		memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
+		ether_addr_copy(mac_addr, addrs.pf_san_mac);
 	else
 		status = I40E_ERR_INVALID_MAC_ADDR;
 
@@ -1260,7 +1257,7 @@
 	grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
 		    I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
 		    I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
-	for (cnt = 0; cnt < grst_del + 2; cnt++) {
+	for (cnt = 0; cnt < grst_del + 10; cnt++) {
 		reg = rd32(hw, I40E_GLGEN_RSTAT);
 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
 			break;
@@ -2238,27 +2235,28 @@
 /**
  * i40e_get_link_status - get status of the HW network link
  * @hw: pointer to the hw struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
  *
- * Returns true if link is up, false if link is down.
+ * Variable link_up true if link is up, false if link is down.
+ * The variable link_up is invalid if returned value of status != 0
  *
  * Side effect: LinkStatusEvent reporting becomes enabled
  **/
-bool i40e_get_link_status(struct i40e_hw *hw)
+i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
 {
 	i40e_status status = 0;
-	bool link_status = false;
 
 	if (hw->phy.get_link_info) {
 		status = i40e_aq_get_link_info(hw, true, NULL, NULL);
 
 		if (status)
-			goto i40e_get_link_status_exit;
+			i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
+				   status);
 	}
 
-	link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+	*link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
 
-i40e_get_link_status_exit:
-	return link_status;
+	return status;
 }
 
 /**
@@ -2365,6 +2363,7 @@
 		*vebs_free = le16_to_cpu(cmd_resp->vebs_free);
 	if (floating) {
 		u16 flags = le16_to_cpu(cmd_resp->veb_flags);
+
 		if (flags & I40E_AQC_ADD_VEB_FLOATING)
 			*floating = true;
 		else
@@ -3779,7 +3778,7 @@
 	}
 
 	if (mac_addr)
-		memcpy(cmd->mac, mac_addr, ETH_ALEN);
+		ether_addr_copy(cmd->mac, mac_addr);
 
 	cmd->etype = cpu_to_le16(ethtype);
 	cmd->flags = cpu_to_le16(flags);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 90de46a..6fa07ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -292,6 +292,182 @@
 }
 
 /**
+ * i40e_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ **/
+static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
+				     struct i40e_dcbx_config *dcbcfg)
+{
+	struct i40e_dcb_ets_config *etscfg;
+	u8 *buf = tlv->tlvinfo;
+	u16 offset = 0;
+	u8 priority;
+	int i;
+
+	etscfg = &dcbcfg->etscfg;
+
+	if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+		etscfg->willing = 1;
+
+	etscfg->cbs = 0;
+	/* Priority Group Table (4 octets)
+	 * Octets:|    1    |    2    |    3    |    4    |
+	 *        -----------------------------------------
+	 *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+	 *        -----------------------------------------
+	 *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+	 *        -----------------------------------------
+	 */
+	for (i = 0; i < 4; i++) {
+		priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
+				 I40E_CEE_PGID_PRIO_1_SHIFT);
+		etscfg->prioritytable[i * 2] =  priority;
+		priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
+				 I40E_CEE_PGID_PRIO_0_SHIFT);
+		etscfg->prioritytable[i * 2 + 1] = priority;
+		offset++;
+	}
+
+	/* PG Percentage Table (8 octets)
+	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+	 *        ---------------------------------
+	 *        |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+	 *        ---------------------------------
+	 */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		etscfg->tcbwtable[i] = buf[offset++];
+
+	/* Number of TCs supported (1 octet) */
+	etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * i40e_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ **/
+static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv,
+				      struct i40e_dcbx_config *dcbcfg)
+{
+	u8 *buf = tlv->tlvinfo;
+
+	if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+		dcbcfg->pfc.willing = 1;
+
+	/* ------------------------
+	 * | PFC Enable | PFC TCs |
+	 * ------------------------
+	 * | 1 octet    | 1 octet |
+	 */
+	dcbcfg->pfc.pfcenable = buf[0];
+	dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * i40e_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ **/
+static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
+				   struct i40e_dcbx_config *dcbcfg)
+{
+	u16 length, typelength, offset = 0;
+	struct i40e_cee_app_prio *app;
+	u8 i, up;
+
+	typelength = ntohs(tlv->hdr.typelen);
+	length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+		       I40E_LLDP_TLV_LEN_SHIFT);
+
+	dcbcfg->numapps = length / sizeof(*app);
+	if (!dcbcfg->numapps)
+		return;
+
+	for (i = 0; i < dcbcfg->numapps; i++) {
+		app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
+		for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
+			if (app->prio_map & (1 << up))
+				break;
+		}
+		dcbcfg->app[i].priority = up;
+		/* Get Selector from lower 2 bits */
+		dcbcfg->app[i].selector = (app->upper_oui_sel &
+					   I40E_CEE_APP_SELECTOR_MASK);
+		dcbcfg->app[i].protocolid = ntohs(app->protocol);
+		/* Move to next app */
+		offset += sizeof(*app);
+	}
+}
+
+/**
+ * i40e_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
+			       struct i40e_dcbx_config *dcbcfg)
+{
+	u16 len, tlvlen, sublen, typelength;
+	struct i40e_cee_feat_tlv *sub_tlv;
+	u8 subtype, feat_tlv_count = 0;
+	u32 ouisubtype;
+
+	ouisubtype = ntohl(tlv->ouisubtype);
+	subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+		       I40E_LLDP_TLV_SUBTYPE_SHIFT);
+	/* Return if not CEE DCBX */
+	if (subtype != I40E_CEE_DCBX_TYPE)
+		return;
+
+	typelength = ntohs(tlv->typelength);
+	tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+			I40E_LLDP_TLV_LEN_SHIFT);
+	len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
+	      sizeof(struct i40e_cee_ctrl_tlv);
+	/* Return if no CEE DCBX Feature TLVs */
+	if (tlvlen <= len)
+		return;
+
+	sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
+	while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
+		typelength = ntohs(sub_tlv->hdr.typelen);
+		sublen = (u16)((typelength &
+				I40E_LLDP_TLV_LEN_MASK) >>
+				I40E_LLDP_TLV_LEN_SHIFT);
+		subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+				I40E_LLDP_TLV_TYPE_SHIFT);
+		switch (subtype) {
+		case I40E_CEE_SUBTYPE_PG_CFG:
+			i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+			break;
+		case I40E_CEE_SUBTYPE_PFC_CFG:
+			i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+			break;
+		case I40E_CEE_SUBTYPE_APP_PRI:
+			i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
+			break;
+		default:
+			return; /* Invalid Sub-type return */
+		}
+		feat_tlv_count++;
+		/* Move to next sub TLV */
+		sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
+						sizeof(sub_tlv->hdr.typelen) +
+						sublen);
+	}
+}
+
+/**
  * i40e_parse_org_tlv
  * @tlv: Organization specific TLV
  * @dcbcfg: Local store to update ETS REC data
@@ -312,6 +488,9 @@
 	case I40E_IEEE_8021QAZ_OUI:
 		i40e_parse_ieee_tlv(tlv, dcbcfg);
 		break;
+	case I40E_CEE_DCBX_OUI:
+		i40e_parse_cee_tlv(tlv, dcbcfg);
+		break;
 	default:
 		break;
 	}
@@ -502,15 +681,18 @@
 	/* CEE PG data to ETS config */
 	dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
 
+	/* Note that the FW creates the oper_prio_tc nibbles reversed
+	 * from those in the CEE Priority Group sub-TLV.
+	 */
 	for (i = 0; i < 4; i++) {
 		tc = (u8)((cee_cfg->oper_prio_tc[i] &
-			 I40E_CEE_PGID_PRIO_1_MASK) >>
-			 I40E_CEE_PGID_PRIO_1_SHIFT);
-		dcbcfg->etscfg.prioritytable[i*2] =  tc;
-		tc = (u8)((cee_cfg->oper_prio_tc[i] &
 			 I40E_CEE_PGID_PRIO_0_MASK) >>
 			 I40E_CEE_PGID_PRIO_0_SHIFT);
-		dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+		dcbcfg->etscfg.prioritytable[i * 2] =  tc;
+		tc = (u8)((cee_cfg->oper_prio_tc[i] &
+			 I40E_CEE_PGID_PRIO_1_MASK) >>
+			 I40E_CEE_PGID_PRIO_1_SHIFT);
+		dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc;
 	}
 
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
@@ -531,37 +713,85 @@
 	dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
 	dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
 
-	status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
-		  I40E_AQC_CEE_APP_STATUS_SHIFT;
+	i = 0;
+	status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
+		  I40E_AQC_CEE_FCOE_STATUS_SHIFT;
 	err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
 	sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
 	oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
-	/* Add APPs if Error is False and Oper/Sync is True */
+	/* Add FCoE APP if Error is False and Oper/Sync is True */
 	if (!err && sync && oper) {
-		/* CEE operating configuration supports FCoE/iSCSI/FIP only */
-		dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
-
 		/* FCoE APP */
-		dcbcfg->app[0].priority =
+		dcbcfg->app[i].priority =
 			(app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
 			 I40E_AQC_CEE_APP_FCOE_SHIFT;
-		dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
-		dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+		dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+		dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
+		i++;
+	}
 
+	status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
+		  I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+	err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+	sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+	oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+	/* Add iSCSI APP if Error is False and Oper/Sync is True */
+	if (!err && sync && oper) {
 		/* iSCSI APP */
-		dcbcfg->app[1].priority =
+		dcbcfg->app[i].priority =
 			(app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
 			 I40E_AQC_CEE_APP_ISCSI_SHIFT;
-		dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
-		dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+		dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+		dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
+		i++;
+	}
 
+	status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
+		  I40E_AQC_CEE_FIP_STATUS_SHIFT;
+	err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+	sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+	oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+	/* Add FIP APP if Error is False and Oper/Sync is True */
+	if (!err && sync && oper) {
 		/* FIP APP */
-		dcbcfg->app[2].priority =
+		dcbcfg->app[i].priority =
 			(app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
 			 I40E_AQC_CEE_APP_FIP_SHIFT;
-		dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
-		dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+		dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+		dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
+		i++;
 	}
+	dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_get_ieee_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get IEEE mode DCB configuration from the Firmware
+ **/
+static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+{
+	i40e_status ret = 0;
+
+	/* IEEE mode */
+	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+	/* Get Local DCB Config */
+	ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+				     &hw->local_dcbx_config);
+	if (ret)
+		goto out;
+
+	/* Get Remote DCB Config */
+	ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+				     I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+				     &hw->remote_dcbx_config);
+	/* Don't treat ENOENT as an error for Remote MIBs */
+	if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+		ret = 0;
+
+out:
+	return ret;
 }
 
 /**
@@ -579,7 +809,7 @@
 	/* If Firmware version < v4.33 IEEE only */
 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
 	    (hw->aq.fw_maj_ver < 4))
-		goto ieee;
+		return i40e_get_ieee_dcb_config(hw);
 
 	/* If Firmware version == v4.33 use old CEE struct */
 	if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
@@ -608,16 +838,14 @@
 
 	/* CEE mode not enabled try querying IEEE data */
 	if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
-		goto ieee;
-	else
+		return i40e_get_ieee_dcb_config(hw);
+
+	if (ret)
 		goto out;
 
-ieee:
-	/* IEEE mode */
-	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
-	/* Get Local DCB Config */
+	/* Get CEE DCB Desired Config */
 	ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
-				     &hw->local_dcbx_config);
+				     &hw->desired_dcbx_config);
 	if (ret)
 		goto out;
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 50fc894..92d0104 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -44,6 +44,15 @@
 #define I40E_IEEE_SUBTYPE_PFC_CFG	11
 #define I40E_IEEE_SUBTYPE_APP_PRI	12
 
+#define I40E_CEE_DCBX_OUI		0x001b21
+#define I40E_CEE_DCBX_TYPE		2
+
+#define I40E_CEE_SUBTYPE_CTRL		1
+#define I40E_CEE_SUBTYPE_PG_CFG		2
+#define I40E_CEE_SUBTYPE_PFC_CFG	3
+#define I40E_CEE_SUBTYPE_APP_PRI	4
+
+#define I40E_CEE_MAX_FEAT_TYPE		3
 /* Defines for LLDP TLV header */
 #define I40E_LLDP_TLV_LEN_SHIFT		0
 #define I40E_LLDP_TLV_LEN_MASK		(0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
@@ -98,6 +107,36 @@
 	__be32 ouisubtype;
 	u8 tlvinfo[1];
 };
+
+struct i40e_cee_tlv_hdr {
+	__be16 typelen;
+	u8 operver;
+	u8 maxver;
+};
+
+struct i40e_cee_ctrl_tlv {
+	struct i40e_cee_tlv_hdr hdr;
+	__be32 seqno;
+	__be32 ackno;
+};
+
+struct i40e_cee_feat_tlv {
+	struct i40e_cee_tlv_hdr hdr;
+	u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK	0x80
+#define I40E_CEE_FEAT_TLV_WILLING_MASK	0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK	0x20
+	u8 subtype;
+	u8 tlvinfo[1];
+};
+
+struct i40e_cee_app_prio {
+	__be16 protocol;
+	u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define I40E_CEE_APP_SELECTOR_MASK	0x03
+	__be16 lower_oui;
+	u8 prio_map;
+};
 #pragma pack()
 
 i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 1c51f73..7c42d13 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -236,13 +236,13 @@
 			       struct i40e_dcb_app_priority_table *app)
 {
 	int v, err;
+
 	for (v = 0; v < pf->num_alloc_vsi; v++) {
 		if (pf->vsi[v] && pf->vsi[v]->netdev) {
 			err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
 			if (err)
-				dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
-					 __func__, pf->vsi[v]->seid,
-					 err, app->selector,
+				dev_info(&pf->pdev->dev, "Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
+					 pf->vsi[v]->seid, err, app->selector,
 					 app->protocolid, app->priority);
 		}
 	}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d7c15d1..c1dd248 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -404,82 +404,82 @@
 	nstat = i40e_get_vsi_stats_struct(vsi);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
-		 (long unsigned int)nstat->rx_packets,
-		 (long unsigned int)nstat->rx_bytes,
-		 (long unsigned int)nstat->rx_errors,
-		 (long unsigned int)nstat->rx_dropped);
+		 (unsigned long int)nstat->rx_packets,
+		 (unsigned long int)nstat->rx_bytes,
+		 (unsigned long int)nstat->rx_errors,
+		 (unsigned long int)nstat->rx_dropped);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
-		 (long unsigned int)nstat->tx_packets,
-		 (long unsigned int)nstat->tx_bytes,
-		 (long unsigned int)nstat->tx_errors,
-		 (long unsigned int)nstat->tx_dropped);
+		 (unsigned long int)nstat->tx_packets,
+		 (unsigned long int)nstat->tx_bytes,
+		 (unsigned long int)nstat->tx_errors,
+		 (unsigned long int)nstat->tx_dropped);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: multicast = %lu, collisions = %lu\n",
-		 (long unsigned int)nstat->multicast,
-		 (long unsigned int)nstat->collisions);
+		 (unsigned long int)nstat->multicast,
+		 (unsigned long int)nstat->collisions);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
-		 (long unsigned int)nstat->rx_length_errors,
-		 (long unsigned int)nstat->rx_over_errors,
-		 (long unsigned int)nstat->rx_crc_errors);
+		 (unsigned long int)nstat->rx_length_errors,
+		 (unsigned long int)nstat->rx_over_errors,
+		 (unsigned long int)nstat->rx_crc_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
-		 (long unsigned int)nstat->rx_frame_errors,
-		 (long unsigned int)nstat->rx_fifo_errors,
-		 (long unsigned int)nstat->rx_missed_errors);
+		 (unsigned long int)nstat->rx_frame_errors,
+		 (unsigned long int)nstat->rx_fifo_errors,
+		 (unsigned long int)nstat->rx_missed_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
-		 (long unsigned int)nstat->tx_aborted_errors,
-		 (long unsigned int)nstat->tx_carrier_errors,
-		 (long unsigned int)nstat->tx_fifo_errors);
+		 (unsigned long int)nstat->tx_aborted_errors,
+		 (unsigned long int)nstat->tx_carrier_errors,
+		 (unsigned long int)nstat->tx_fifo_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
-		 (long unsigned int)nstat->tx_heartbeat_errors,
-		 (long unsigned int)nstat->tx_window_errors);
+		 (unsigned long int)nstat->tx_heartbeat_errors,
+		 (unsigned long int)nstat->tx_window_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
-		 (long unsigned int)nstat->rx_compressed,
-		 (long unsigned int)nstat->tx_compressed);
+		 (unsigned long int)nstat->rx_compressed,
+		 (unsigned long int)nstat->tx_compressed);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.rx_packets,
-		 (long unsigned int)vsi->net_stats_offsets.rx_bytes,
-		 (long unsigned int)vsi->net_stats_offsets.rx_errors,
-		 (long unsigned int)vsi->net_stats_offsets.rx_dropped);
+		 (unsigned long int)vsi->net_stats_offsets.rx_packets,
+		 (unsigned long int)vsi->net_stats_offsets.rx_bytes,
+		 (unsigned long int)vsi->net_stats_offsets.rx_errors,
+		 (unsigned long int)vsi->net_stats_offsets.rx_dropped);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.tx_packets,
-		 (long unsigned int)vsi->net_stats_offsets.tx_bytes,
-		 (long unsigned int)vsi->net_stats_offsets.tx_errors,
-		 (long unsigned int)vsi->net_stats_offsets.tx_dropped);
+		 (unsigned long int)vsi->net_stats_offsets.tx_packets,
+		 (unsigned long int)vsi->net_stats_offsets.tx_bytes,
+		 (unsigned long int)vsi->net_stats_offsets.tx_errors,
+		 (unsigned long int)vsi->net_stats_offsets.tx_dropped);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: multicast = %lu, collisions = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.multicast,
-		 (long unsigned int)vsi->net_stats_offsets.collisions);
+		 (unsigned long int)vsi->net_stats_offsets.multicast,
+		 (unsigned long int)vsi->net_stats_offsets.collisions);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.rx_length_errors,
-		 (long unsigned int)vsi->net_stats_offsets.rx_over_errors,
-		 (long unsigned int)vsi->net_stats_offsets.rx_crc_errors);
+		 (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
+		 (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
+		 (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.rx_frame_errors,
-		 (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors,
-		 (long unsigned int)vsi->net_stats_offsets.rx_missed_errors);
+		 (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
+		 (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
+		 (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors,
-		 (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors,
-		 (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors);
+		 (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
+		 (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
+		 (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors,
-		 (long unsigned int)vsi->net_stats_offsets.tx_window_errors);
+		 (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
+		 (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.rx_compressed,
-		 (long unsigned int)vsi->net_stats_offsets.tx_compressed);
+		 (unsigned long int)vsi->net_stats_offsets.rx_compressed,
+		 (unsigned long int)vsi->net_stats_offsets.tx_compressed);
 	dev_info(&pf->pdev->dev,
 		 "    tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
 		 vsi->tx_restart, vsi->tx_busy,
@@ -487,6 +487,7 @@
 	rcu_read_lock();
 	for (i = 0; i < vsi->num_queue_pairs; i++) {
 		struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+
 		if (!rx_ring)
 			continue;
 
@@ -527,7 +528,7 @@
 		dev_info(&pf->pdev->dev,
 			 "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
 			 i, rx_ring->size,
-			 (long unsigned int)rx_ring->dma);
+			 (unsigned long int)rx_ring->dma);
 		dev_info(&pf->pdev->dev,
 			 "    rx_rings[%i]: vsi = %p, q_vector = %p\n",
 			 i, rx_ring->vsi,
@@ -535,6 +536,7 @@
 	}
 	for (i = 0; i < vsi->num_queue_pairs; i++) {
 		struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+
 		if (!tx_ring)
 			continue;
 
@@ -573,7 +575,7 @@
 		dev_info(&pf->pdev->dev,
 			 "    tx_rings[%i]: size = %i, dma = 0x%08lx\n",
 			 i, tx_ring->size,
-			 (long unsigned int)tx_ring->dma);
+			 (unsigned long int)tx_ring->dma);
 		dev_info(&pf->pdev->dev,
 			 "    tx_rings[%i]: vsi = %p, q_vector = %p\n",
 			 i, tx_ring->vsi,
@@ -743,6 +745,7 @@
 	ring = &(hw->aq.asq);
 	for (i = 0; i < ring->count; i++) {
 		struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
 		dev_info(&pf->pdev->dev,
 			 "   at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
 			 i, d->flags, d->opcode, d->datalen, d->retval,
@@ -755,6 +758,7 @@
 	ring = &(hw->aq.arq);
 	for (i = 0; i < ring->count; i++) {
 		struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
 		dev_info(&pf->pdev->dev,
 			 "   ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
 			 i, d->flags, d->opcode, d->datalen, d->retval,
@@ -1038,7 +1042,13 @@
 			dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
 
 	} else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
-		sscanf(&cmd_buf[7], "%i", &vsi_seid);
+		cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
+		if (cnt != 1) {
+			dev_info(&pf->pdev->dev,
+				 "del vsi: bad command string, cnt=%d\n",
+				 cnt);
+			goto command_write_done;
+		}
 		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
 		if (!vsi) {
 			dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
@@ -1146,7 +1156,7 @@
 		}
 
 		f = i40e_add_filter(vsi, ma, vlan, false, false);
-		ret = i40e_sync_vsi_filters(vsi);
+		ret = i40e_sync_vsi_filters(vsi, true);
 		if (f && !ret)
 			dev_info(&pf->pdev->dev,
 				 "add macaddr: %pM vlan=%d added to VSI %d\n",
@@ -1183,7 +1193,7 @@
 		}
 
 		i40e_del_filter(vsi, ma, vlan, false, false);
-		ret = i40e_sync_vsi_filters(vsi);
+		ret = i40e_sync_vsi_filters(vsi, true);
 		if (!ret)
 			dev_info(&pf->pdev->dev,
 				 "del macaddr: %pM vlan=%d removed from VSI %d\n",
@@ -1488,6 +1498,7 @@
 	} else if (strncmp(cmd_buf, "read", 4) == 0) {
 		u32 address;
 		u32 value;
+
 		cnt = sscanf(&cmd_buf[4], "%i", &address);
 		if (cnt != 1) {
 			dev_info(&pf->pdev->dev, "read <reg>\n");
@@ -1495,9 +1506,9 @@
 		}
 
 		/* check the range on address */
-		if (address >= I40E_MAX_REGISTER) {
-			dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n",
-				 address);
+		if (address > (pf->ioremap_len - sizeof(u32))) {
+			dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
+				 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
 			goto command_write_done;
 		}
 
@@ -1507,6 +1518,7 @@
 
 	} else if (strncmp(cmd_buf, "write", 5) == 0) {
 		u32 address, value;
+
 		cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
 		if (cnt != 2) {
 			dev_info(&pf->pdev->dev, "write <reg> <value>\n");
@@ -1514,9 +1526,9 @@
 		}
 
 		/* check the range on address */
-		if (address >= I40E_MAX_REGISTER) {
-			dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n",
-				 address);
+		if (address > (pf->ioremap_len - sizeof(u32))) {
+			dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
+				 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
 			goto command_write_done;
 		}
 		wr32(&pf->hw, address, value);
@@ -1528,6 +1540,7 @@
 			cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
 			if (cnt == 0) {
 				int i;
+
 				for (i = 0; i < pf->num_alloc_vsi; i++)
 					i40e_vsi_reset_stats(pf->vsi[i]);
 				dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
@@ -1726,8 +1739,9 @@
 				   packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
 
 		for (i = 0; i < packet_len; i++) {
-			sscanf(&asc_packet[j], "%2hhx ",
-			       &raw_packet[i]);
+			cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]);
+			if (!cnt)
+				break;
 			j += 3;
 		}
 		dev_info(&pf->pdev->dev, "FD raw packet dump\n");
@@ -1755,6 +1769,7 @@
 	} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
 		if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
 			int ret;
+
 			ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
 			if (ret) {
 				dev_info(&pf->pdev->dev,
@@ -1779,6 +1794,7 @@
 #endif /* CONFIG_I40E_DCB */
 		} else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
 			int ret;
+
 			ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
 						pf->hw.mac.addr,
 						I40E_ETH_P_LLDP, 0,
@@ -1807,6 +1823,7 @@
 			u16 llen, rlen;
 			int ret;
 			u8 *buff;
+
 			buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
 			if (!buff)
 				goto command_write_done;
@@ -1833,6 +1850,7 @@
 			u16 llen, rlen;
 			int ret;
 			u8 *buff;
+
 			buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
 			if (!buff)
 				goto command_write_done;
@@ -1858,6 +1876,7 @@
 			buff = NULL;
 		} else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
 			int ret;
+
 			ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
 								true, NULL);
 			if (ret) {
@@ -1868,6 +1887,7 @@
 			}
 		} else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
 			int ret;
+
 			ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
 								false, NULL);
 			if (ret) {
@@ -2105,6 +2125,7 @@
 		}
 	} else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
 		int mtu;
+
 		cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
 			     &vsi_seid, &mtu);
 		if (cnt != 2) {
@@ -2220,7 +2241,6 @@
 create_failed:
 	dev_info(dev, "debugfs dir/file for %s failed\n", name);
 	debugfs_remove_recursive(pf->i40e_dbg_pf);
-	return;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index e972b5e..148f614 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -87,6 +87,7 @@
 	I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
 	I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
 	I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
+	I40E_VSI_STAT("tx_linearize", tx_linearize),
 };
 
 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
@@ -229,10 +230,10 @@
 
 static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
 	"NPAR",
+	"LinkPolling",
 };
 
-#define I40E_PRIV_FLAGS_STR_LEN \
-	(sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN)
+#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
 
 /**
  * i40e_partition_setting_complaint - generic complaint for MFP restriction
@@ -425,6 +426,7 @@
 		ecmd->advertising = ADVERTISED_10000baseKR_Full;
 		break;
 	case I40E_DEV_ID_10G_BASE_T:
+	case I40E_DEV_ID_10G_BASE_T4:
 		ecmd->supported = SUPPORTED_10000baseT_Full |
 				  SUPPORTED_1000baseT_Full |
 				  SUPPORTED_100baseT_Full;
@@ -437,6 +439,7 @@
 			ecmd->advertising |= ADVERTISED_100baseT_Full;
 		break;
 	case I40E_DEV_ID_20G_KR2:
+	case I40E_DEV_ID_20G_KR2_A:
 		/* backplane 20G */
 		ecmd->supported = SUPPORTED_20000baseKR2_Full;
 		ecmd->advertising = ADVERTISED_20000baseKR2_Full;
@@ -664,6 +667,13 @@
 	    advertise & ADVERTISED_40000baseLR4_Full)
 		config.link_speed |= I40E_LINK_SPEED_40GB;
 
+	/* If speed didn't get set, set it to what it currently is.
+	 * This is needed because if advertise is 0 (as it is when autoneg
+	 * is disabled) then speed won't get set.
+	 */
+	if (!config.link_speed)
+		config.link_speed = abilities.link_speed;
+
 	if (change || (abilities.link_speed != config.link_speed)) {
 		/* copy over the rest of the abilities */
 		config.phy_type = abilities.phy_type;
@@ -680,7 +690,7 @@
 			/* Tell the OS link is going down, the link will go
 			 * back up when fw says it is ready asynchronously
 			 */
-			netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n");
+			i40e_print_link_message(vsi, false);
 			netif_carrier_off(netdev);
 			netif_tx_stop_all_queues(netdev);
 		}
@@ -824,7 +834,7 @@
 	/* Tell the OS link is going down, the link will go back up when fw
 	 * says it is ready asynchronously
 	 */
-	netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n");
+	i40e_print_link_message(vsi, false);
 	netif_carrier_off(netdev);
 	netif_tx_stop_all_queues(netdev);
 
@@ -1166,6 +1176,11 @@
 			/* clone ring and setup updated count */
 			tx_rings[i] = *vsi->tx_rings[i];
 			tx_rings[i].count = new_tx_count;
+			/* the desc and bi pointers will be reallocated in the
+			 * setup call
+			 */
+			tx_rings[i].desc = NULL;
+			tx_rings[i].rx_bi = NULL;
 			err = i40e_setup_tx_descriptors(&tx_rings[i]);
 			if (err) {
 				while (i) {
@@ -1196,6 +1211,11 @@
 			/* clone ring and setup updated count */
 			rx_rings[i] = *vsi->rx_rings[i];
 			rx_rings[i].count = new_rx_count;
+			/* the desc and bi pointers will be reallocated in the
+			 * setup call
+			 */
+			rx_rings[i].desc = NULL;
+			rx_rings[i].rx_bi = NULL;
 			err = i40e_setup_rx_descriptors(&rx_rings[i]);
 			if (err) {
 				while (i) {
@@ -1263,7 +1283,8 @@
 		if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
 			int len = I40E_PF_STATS_LEN(netdev);
 
-			if (pf->lan_veb != I40E_NO_VEB)
+			if ((pf->lan_veb != I40E_NO_VEB) &&
+			    (pf->flags & I40E_FLAG_VEB_STATS_ENABLED))
 				len += I40E_VEB_STATS_TOTAL;
 			return len;
 		} else {
@@ -1336,8 +1357,10 @@
 	if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
 		return;
 
-	if (pf->lan_veb != I40E_NO_VEB) {
+	if ((pf->lan_veb != I40E_NO_VEB) &&
+	    (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
 		struct i40e_veb *veb = pf->veb[pf->lan_veb];
+
 		for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
 			p = (char *)veb;
 			p += i40e_gstrings_veb_stats[j].stat_offset;
@@ -1409,7 +1432,8 @@
 		if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
 			return;
 
-		if (pf->lan_veb != I40E_NO_VEB) {
+		if ((pf->lan_veb != I40E_NO_VEB) &&
+		    (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
 			for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
 				snprintf(p, ETH_GSTRING_LEN, "veb.%s",
 					i40e_gstrings_veb_stats[i].stat_string);
@@ -1504,9 +1528,18 @@
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_pf *pf = np->vsi->back;
+	i40e_status status;
+	bool link_up = false;
 
 	netif_info(pf, hw, netdev, "link test\n");
-	if (i40e_get_link_status(&pf->hw))
+	status = i40e_get_link_status(&pf->hw, &link_up);
+	if (status) {
+		netif_err(pf, drv, netdev, "link query timed out, please retry test\n");
+		*data = 1;
+		return *data;
+	}
+
+	if (link_up)
 		*data = 0;
 	else
 		*data = 1;
@@ -1575,7 +1608,7 @@
 	int i;
 
 	for (i = 0; i < pf->num_alloc_vfs; i++)
-		if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
+		if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states))
 			return true;
 	return false;
 }
@@ -2604,10 +2637,31 @@
 
 	ret_flags |= pf->hw.func_caps.npar_enable ?
 		I40E_PRIV_FLAGS_NPAR_FLAG : 0;
+	ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
+		I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
 
 	return ret_flags;
 }
 
+/**
+ * i40e_set_priv_flags - set private flags
+ * @dev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+{
+	struct i40e_netdev_priv *np = netdev_priv(dev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+
+	if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
+		pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
+	else
+		pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED;
+
+	return 0;
+}
+
 static const struct ethtool_ops i40e_ethtool_ops = {
 	.get_settings		= i40e_get_settings,
 	.set_settings		= i40e_set_settings,
@@ -2644,6 +2698,7 @@
 	.set_channels		= i40e_set_channels,
 	.get_ts_info		= i40e_get_ts_info,
 	.get_priv_flags		= i40e_get_priv_flags,
+	.set_priv_flags		= i40e_set_priv_flags,
 };
 
 void i40e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 5ea75dd..eaedc1f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -272,10 +272,8 @@
 /**
  * i40e_fcoe_sw_init - sets up the HW for FCoE
  * @pf: pointer to PF
- *
- * Returns 0 if FCoE is supported otherwise the error code
  **/
-int i40e_init_pf_fcoe(struct i40e_pf *pf)
+void i40e_init_pf_fcoe(struct i40e_pf *pf)
 {
 	struct i40e_hw *hw = &pf->hw;
 	u32 val;
@@ -287,13 +285,13 @@
 
 	if (!pf->hw.func_caps.fcoe) {
 		dev_info(&pf->pdev->dev, "FCoE capability is disabled\n");
-		return 0;
+		return;
 	}
 
 	if (!pf->hw.func_caps.dcb) {
 		dev_warn(&pf->pdev->dev,
 			 "Hardware is not DCB capable not enabling FCoE.\n");
-		return 0;
+		return;
 	}
 
 	/* enable FCoE hash filter */
@@ -326,7 +324,6 @@
 	wr32(hw, I40E_GLFCOE_RCTL, val);
 
 	dev_info(&pf->pdev->dev, "FCoE is supported.\n");
-	return 0;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index fa371a2..79ae7be 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -431,9 +431,8 @@
 			pd_idx1 = max(pd_idx,
 				      ((j - 1) * I40E_HMC_MAX_BP_COUNT));
 			pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
-			for (i = pd_idx1; i < pd_lmt1; i++) {
+			for (i = pd_idx1; i < pd_lmt1; i++)
 				i40e_remove_pd_bp(hw, info->hmc_info, i);
-			}
 			i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
 			break;
 		case I40E_SD_TYPE_DIRECT:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 851c1a1..a484f22 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 9
+#define DRV_VERSION_BUILD 21
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -75,10 +75,13 @@
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
 	/* required last entry */
 	{0, }
 };
@@ -213,10 +216,10 @@
 			ret = i;
 			pile->search_hint = i + j;
 			break;
-		} else {
-			/* not enough, so skip over it and continue looking */
-			i += j;
 		}
+
+		/* not enough, so skip over it and continue looking */
+		i += j;
 	}
 
 	return ret;
@@ -299,25 +302,69 @@
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_vsi *vsi = np->vsi;
 	struct i40e_pf *pf = vsi->back;
+	struct i40e_ring *tx_ring = NULL;
+	unsigned int i, hung_queue = 0;
+	u32 head, val;
 
 	pf->tx_timeout_count++;
 
+	/* find the stopped queue the same way the stack does */
+	for (i = 0; i < netdev->num_tx_queues; i++) {
+		struct netdev_queue *q;
+		unsigned long trans_start;
+
+		q = netdev_get_tx_queue(netdev, i);
+		trans_start = q->trans_start ? : netdev->trans_start;
+		if (netif_xmit_stopped(q) &&
+		    time_after(jiffies,
+			       (trans_start + netdev->watchdog_timeo))) {
+			hung_queue = i;
+			break;
+		}
+	}
+
+	if (i == netdev->num_tx_queues) {
+		netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
+	} else {
+		/* now that we have an index, find the tx_ring struct */
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+				if (hung_queue ==
+				    vsi->tx_rings[i]->queue_index) {
+					tx_ring = vsi->tx_rings[i];
+					break;
+				}
+			}
+		}
+	}
+
 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
-		pf->tx_timeout_recovery_level = 1;
+		pf->tx_timeout_recovery_level = 1;  /* reset after some time */
+	else if (time_before(jiffies,
+		      (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
+		return;   /* don't do any new action before the next timeout */
+
+	if (tx_ring) {
+		head = i40e_get_head(tx_ring);
+		/* Read interrupt register */
+		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+			val = rd32(&pf->hw,
+			     I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
+						tx_ring->vsi->base_vector - 1));
+		else
+			val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
+
+		netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+			    vsi->seid, hung_queue, tx_ring->next_to_clean,
+			    head, tx_ring->next_to_use,
+			    readl(tx_ring->tail), val);
+	}
+
 	pf->tx_timeout_last_recovery = jiffies;
-	netdev_info(netdev, "tx_timeout recovery level %d\n",
-		    pf->tx_timeout_recovery_level);
+	netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
+		    pf->tx_timeout_recovery_level, hung_queue);
 
 	switch (pf->tx_timeout_recovery_level) {
-	case 0:
-		/* disable and re-enable queues for the VSI */
-		if (in_interrupt()) {
-			set_bit(__I40E_REINIT_REQUESTED, &pf->state);
-			set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
-		} else {
-			i40e_vsi_reinit_locked(vsi);
-		}
-		break;
 	case 1:
 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
 		break;
@@ -329,10 +376,9 @@
 		break;
 	default:
 		netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
-		set_bit(__I40E_DOWN_REQUESTED, &pf->state);
-		set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
 		break;
 	}
+
 	i40e_service_event_schedule(pf);
 	pf->tx_timeout_recovery_level++;
 }
@@ -431,6 +477,7 @@
 	stats->tx_errors	= vsi_stats->tx_errors;
 	stats->tx_dropped	= vsi_stats->tx_dropped;
 	stats->rx_errors	= vsi_stats->rx_errors;
+	stats->rx_dropped	= vsi_stats->rx_dropped;
 	stats->rx_crc_errors	= vsi_stats->rx_crc_errors;
 	stats->rx_length_errors	= vsi_stats->rx_length_errors;
 
@@ -456,11 +503,11 @@
 	memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
 	if (vsi->rx_rings && vsi->rx_rings[0]) {
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			memset(&vsi->rx_rings[i]->stats, 0 ,
+			memset(&vsi->rx_rings[i]->stats, 0,
 			       sizeof(vsi->rx_rings[i]->stats));
-			memset(&vsi->rx_rings[i]->rx_stats, 0 ,
+			memset(&vsi->rx_rings[i]->rx_stats, 0,
 			       sizeof(vsi->rx_rings[i]->rx_stats));
-			memset(&vsi->tx_rings[i]->stats, 0 ,
+			memset(&vsi->tx_rings[i]->stats, 0,
 			       sizeof(vsi->tx_rings[i]->stats));
 			memset(&vsi->tx_rings[i]->tx_stats, 0,
 			       sizeof(vsi->tx_rings[i]->tx_stats));
@@ -754,7 +801,6 @@
 	struct i40e_hw_port_stats *nsd = &pf->stats;
 	struct i40e_hw *hw = &pf->hw;
 	u64 xoff = 0;
-	u16 i, v;
 
 	if ((hw->fc.current_mode != I40E_FC_FULL) &&
 	    (hw->fc.current_mode != I40E_FC_RX_PAUSE))
@@ -769,18 +815,6 @@
 	if (!(nsd->link_xoff_rx - xoff))
 		return;
 
-	/* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
-	for (v = 0; v < pf->num_alloc_vsi; v++) {
-		struct i40e_vsi *vsi = pf->vsi[v];
-
-		if (!vsi || !vsi->tx_rings[0])
-			continue;
-
-		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			struct i40e_ring *ring = vsi->tx_rings[i];
-			clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
-		}
-	}
 }
 
 /**
@@ -796,7 +830,7 @@
 	bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
 	struct i40e_dcbx_config *dcb_cfg;
 	struct i40e_hw *hw = &pf->hw;
-	u16 i, v;
+	u16 i;
 	u8 tc;
 
 	dcb_cfg = &hw->local_dcbx_config;
@@ -809,6 +843,7 @@
 
 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
 		u64 prio_xoff = nsd->priority_xoff_rx[i];
+
 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
 				   pf->stat_offsets_loaded,
 				   &osd->priority_xoff_rx[i],
@@ -821,23 +856,6 @@
 		tc = dcb_cfg->etscfg.prioritytable[i];
 		xoff[tc] = true;
 	}
-
-	/* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
-	for (v = 0; v < pf->num_alloc_vsi; v++) {
-		struct i40e_vsi *vsi = pf->vsi[v];
-
-		if (!vsi || !vsi->tx_rings[0])
-			continue;
-
-		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			struct i40e_ring *ring = vsi->tx_rings[i];
-
-			tc = ring->dcb_tc;
-			if (xoff[tc])
-				clear_bit(__I40E_HANG_CHECK_ARMED,
-					  &ring->state);
-		}
-	}
 }
 
 /**
@@ -862,6 +880,7 @@
 	u32 rx_page, rx_buf;
 	u64 bytes, packets;
 	unsigned int start;
+	u64 tx_linearize;
 	u64 rx_p, rx_b;
 	u64 tx_p, tx_b;
 	u16 q;
@@ -880,7 +899,7 @@
 	 */
 	rx_b = rx_p = 0;
 	tx_b = tx_p = 0;
-	tx_restart = tx_busy = 0;
+	tx_restart = tx_busy = tx_linearize = 0;
 	rx_page = 0;
 	rx_buf = 0;
 	rcu_read_lock();
@@ -897,6 +916,7 @@
 		tx_p += packets;
 		tx_restart += p->tx_stats.restart_queue;
 		tx_busy += p->tx_stats.tx_busy;
+		tx_linearize += p->tx_stats.tx_linearize;
 
 		/* Rx queue is part of the same block as Tx queue */
 		p = &p[1];
@@ -913,6 +933,7 @@
 	rcu_read_unlock();
 	vsi->tx_restart = tx_restart;
 	vsi->tx_busy = tx_busy;
+	vsi->tx_linearize = tx_linearize;
 	vsi->rx_page_failed = rx_page;
 	vsi->rx_buf_failed = rx_buf;
 
@@ -1256,7 +1277,7 @@
 	 * so we have to go through all the list in order to make sure
 	 */
 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
-		if (f->vlan >= 0)
+		if (f->vlan >= 0 || vsi->info.pvid)
 			return true;
 	}
 
@@ -1419,6 +1440,7 @@
 	} else {
 		/* make sure we don't remove a filter in use by VF or netdev */
 		int min_f = 0;
+
 		min_f += (f->is_vf ? 1 : 0);
 		min_f += (f->is_netdev ? 1 : 0);
 
@@ -1477,6 +1499,7 @@
 
 	if (vsi->type == I40E_VSI_MAIN) {
 		i40e_status ret;
+
 		ret = i40e_aq_mac_address_write(&vsi->back->hw,
 						I40E_AQC_WRITE_TYPE_LAA_WOL,
 						addr->sa_data, NULL);
@@ -1514,7 +1537,7 @@
 			f->is_laa = true;
 	}
 
-	i40e_sync_vsi_filters(vsi);
+	i40e_sync_vsi_filters(vsi, false);
 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
 
 	return 0;
@@ -1709,36 +1732,27 @@
 
 	/* remove filter if not in netdev list */
 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
-		bool found = false;
 
 		if (!f->is_netdev)
 			continue;
 
-		if (is_multicast_ether_addr(f->macaddr)) {
-			netdev_for_each_mc_addr(mca, netdev) {
-				if (ether_addr_equal(mca->addr, f->macaddr)) {
-					found = true;
-					break;
-				}
-			}
-		} else {
-			netdev_for_each_uc_addr(uca, netdev) {
-				if (ether_addr_equal(uca->addr, f->macaddr)) {
-					found = true;
-					break;
-				}
-			}
+		netdev_for_each_mc_addr(mca, netdev)
+			if (ether_addr_equal(mca->addr, f->macaddr))
+				goto bottom_of_search_loop;
 
-			for_each_dev_addr(netdev, ha) {
-				if (ether_addr_equal(ha->addr, f->macaddr)) {
-					found = true;
-					break;
-				}
-			}
-		}
-		if (!found)
-			i40e_del_filter(
-			   vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+		netdev_for_each_uc_addr(uca, netdev)
+			if (ether_addr_equal(uca->addr, f->macaddr))
+				goto bottom_of_search_loop;
+
+		for_each_dev_addr(netdev, ha)
+			if (ether_addr_equal(ha->addr, f->macaddr))
+				goto bottom_of_search_loop;
+
+		/* f->macaddr wasn't found in uc, mc, or ha list so delete it */
+		i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+
+bottom_of_search_loop:
+		continue;
 	}
 
 	/* check for other flag changes */
@@ -1751,12 +1765,13 @@
 /**
  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
  * @vsi: ptr to the VSI
+ * @grab_rtnl: whether RTNL needs to be grabbed
  *
  * Push any outstanding VSI filter changes through the AdminQ.
  *
  * Returns 0 or error value
  **/
-int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
 {
 	struct i40e_mac_filter *f, *ftmp;
 	bool promisc_forced_on = false;
@@ -1920,6 +1935,7 @@
 	/* check for changes in promiscuous modes */
 	if (changed_flags & IFF_ALLMULTI) {
 		bool cur_multipromisc;
+
 		cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
 		ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
 							    vsi->seid,
@@ -1934,6 +1950,7 @@
 	}
 	if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
 		bool cur_promisc;
+
 		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
 			       test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
 					&vsi->state));
@@ -1945,7 +1962,11 @@
 			 */
 			if (pf->cur_promisc != cur_promisc) {
 				pf->cur_promisc = cur_promisc;
-				i40e_do_reset_safe(pf,
+				if (grab_rtnl)
+					i40e_do_reset_safe(pf,
+						BIT(__I40E_PF_RESET_REQUESTED));
+				else
+					i40e_do_reset(pf,
 						BIT(__I40E_PF_RESET_REQUESTED));
 			}
 		} else {
@@ -1996,7 +2017,7 @@
 	for (v = 0; v < pf->num_alloc_vsi; v++) {
 		if (pf->vsi[v] &&
 		    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
-			i40e_sync_vsi_filters(pf->vsi[v]);
+			i40e_sync_vsi_filters(pf->vsi[v], true);
 	}
 }
 
@@ -2203,7 +2224,7 @@
 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
 		return 0;
 
-	return i40e_sync_vsi_filters(vsi);
+	return i40e_sync_vsi_filters(vsi, false);
 }
 
 /**
@@ -2275,7 +2296,7 @@
 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
 		return 0;
 
-	return i40e_sync_vsi_filters(vsi);
+	return i40e_sync_vsi_filters(vsi, false);
 }
 
 /**
@@ -2609,8 +2630,6 @@
 	wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
 	i40e_flush(hw);
 
-	clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
-
 	/* cache tail off for easier writes later */
 	ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
 
@@ -2672,7 +2691,8 @@
 		rx_ctx.lrxqthresh = 2;
 	rx_ctx.crcstrip = 1;
 	rx_ctx.l2tsel = 1;
-	rx_ctx.showiv = 1;
+	/* this controls whether VLAN is stripped from inner headers */
+	rx_ctx.showiv = 0;
 #ifdef I40E_FCOE
 	rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
 #endif
@@ -3045,24 +3065,6 @@
 }
 
 /**
- * i40e_irq_dynamic_enable - Enable default interrupt generation settings
- * @vsi: pointer to a vsi
- * @vector: enable a particular Hw Interrupt vector
- **/
-void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
-{
-	struct i40e_pf *pf = vsi->back;
-	struct i40e_hw *hw = &pf->hw;
-	u32 val;
-
-	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-	      I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
-	wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
-	/* skip the flush */
-}
-
-/**
  * i40e_irq_dynamic_disable - Disable default interrupt generation settings
  * @vsi: pointer to a vsi
  * @vector: disable a particular Hw Interrupt vector
@@ -3135,8 +3137,7 @@
 				  q_vector);
 		if (err) {
 			dev_info(&pf->pdev->dev,
-				 "%s: request_irq failed, error: %d\n",
-				 __func__, err);
+				 "MSIX request_irq failed, error: %d\n", err);
 			goto free_queue_irqs;
 		}
 		/* assign the mask for this irq */
@@ -3201,8 +3202,7 @@
 	int i;
 
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
-		for (i = vsi->base_vector;
-		     i < (vsi->num_q_vectors + vsi->base_vector); i++)
+		for (i = 0; i < vsi->num_q_vectors; i++)
 			i40e_irq_dynamic_enable(vsi, i);
 	} else {
 		i40e_irq_dynamic_enable_icr0(pf);
@@ -3264,6 +3264,7 @@
 
 		/* temporarily disable queue cause for NAPI processing */
 		u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+
 		qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
 		wr32(hw, I40E_QINT_RQCTL(0), qval);
 
@@ -3433,10 +3434,9 @@
 	i += tx_ring->count;
 	tx_ring->next_to_clean = i;
 
-	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
-		i40e_irq_dynamic_enable(vsi,
-				tx_ring->q_vector->v_idx + vsi->base_vector);
-	}
+	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
+		i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
+
 	return budget > 0;
 }
 
@@ -3662,9 +3662,8 @@
 		ret = i40e_pf_txq_wait(pf, pf_q, enable);
 		if (ret) {
 			dev_info(&pf->pdev->dev,
-				 "%s: VSI seid %d Tx ring %d %sable timeout\n",
-				 __func__, vsi->seid, pf_q,
-				 (enable ? "en" : "dis"));
+				 "VSI seid %d Tx ring %d %sable timeout\n",
+				 vsi->seid, pf_q, (enable ? "en" : "dis"));
 			break;
 		}
 	}
@@ -3740,9 +3739,8 @@
 		ret = i40e_pf_rxq_wait(pf, pf_q, enable);
 		if (ret) {
 			dev_info(&pf->pdev->dev,
-				 "%s: VSI seid %d Rx ring %d %sable timeout\n",
-				 __func__, vsi->seid, pf_q,
-				 (enable ? "en" : "dis"));
+				 "VSI seid %d Rx ring %d %sable timeout\n",
+				 vsi->seid, pf_q, (enable ? "en" : "dis"));
 			break;
 		}
 	}
@@ -4037,17 +4035,15 @@
 	if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
 	    vsi->type == I40E_VSI_FCOE) {
 		dev_dbg(&vsi->back->pdev->dev,
-			"%s: VSI seid %d skipping FCoE VSI disable\n",
-			 __func__, vsi->seid);
+			 "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
 		return;
 	}
 
 	set_bit(__I40E_NEEDS_RESTART, &vsi->state);
-	if (vsi->netdev && netif_running(vsi->netdev)) {
+	if (vsi->netdev && netif_running(vsi->netdev))
 		vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
-	} else {
+	else
 		i40e_vsi_close(vsi);
-	}
 }
 
 /**
@@ -4112,8 +4108,8 @@
 		ret = i40e_pf_txq_wait(pf, pf_q, false);
 		if (ret) {
 			dev_info(&pf->pdev->dev,
-				 "%s: VSI seid %d Tx ring %d disable timeout\n",
-				 __func__, vsi->seid, pf_q);
+				 "VSI seid %d Tx ring %d disable timeout\n",
+				 vsi->seid, pf_q);
 			return ret;
 		}
 	}
@@ -4145,6 +4141,108 @@
 }
 
 #endif
+
+/**
+ * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
+ * @q_idx: TX queue number
+ * @vsi: Pointer to VSI struct
+ *
+ * This function checks specified queue for given VSI. Detects hung condition.
+ * Sets hung bit since it is two step process. Before next run of service task
+ * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
+ * hung condition remain unchanged and during subsequent run, this function
+ * issues SW interrupt to recover from hung condition.
+ **/
+static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
+{
+	struct i40e_ring *tx_ring = NULL;
+	struct i40e_pf	*pf;
+	u32 head, val, tx_pending;
+	int i;
+
+	pf = vsi->back;
+
+	/* now that we have an index, find the tx_ring struct */
+	for (i = 0; i < vsi->num_queue_pairs; i++) {
+		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+			if (q_idx == vsi->tx_rings[i]->queue_index) {
+				tx_ring = vsi->tx_rings[i];
+				break;
+			}
+		}
+	}
+
+	if (!tx_ring)
+		return;
+
+	/* Read interrupt register */
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+		val = rd32(&pf->hw,
+			   I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
+					       tx_ring->vsi->base_vector - 1));
+	else
+		val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
+
+	head = i40e_get_head(tx_ring);
+
+	tx_pending = i40e_get_tx_pending(tx_ring);
+
+	/* Interrupts are disabled and TX pending is non-zero,
+	 * trigger the SW interrupt (don't wait). Worst case
+	 * there will be one extra interrupt which may result
+	 * into not cleaning any queues because queues are cleaned.
+	 */
+	if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
+		i40e_force_wb(vsi, tx_ring->q_vector);
+}
+
+/**
+ * i40e_detect_recover_hung - Function to detect and recover hung_queues
+ * @pf:  pointer to PF struct
+ *
+ * LAN VSI has netdev and netdev has TX queues. This function is to check
+ * each of those TX queues if they are hung, trigger recovery by issuing
+ * SW interrupt.
+ **/
+static void i40e_detect_recover_hung(struct i40e_pf *pf)
+{
+	struct net_device *netdev;
+	struct i40e_vsi *vsi;
+	int i;
+
+	/* Only for LAN VSI */
+	vsi = pf->vsi[pf->lan_vsi];
+
+	if (!vsi)
+		return;
+
+	/* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
+	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+		return;
+
+	/* Make sure type is MAIN VSI */
+	if (vsi->type != I40E_VSI_MAIN)
+		return;
+
+	netdev = vsi->netdev;
+	if (!netdev)
+		return;
+
+	/* Bail out if netif_carrier is not OK */
+	if (!netif_carrier_ok(netdev))
+		return;
+
+	/* Go thru' TX queues for netdev */
+	for (i = 0; i < netdev->num_tx_queues; i++) {
+		struct netdev_queue *q;
+
+		q = netdev_get_tx_queue(netdev, i);
+		if (q)
+			i40e_detect_recover_hung_queue(i, vsi);
+	}
+}
+
 /**
  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
  * @pf: pointer to PF
@@ -4744,11 +4842,14 @@
  * i40e_print_link_message - print link up or down
  * @vsi: the VSI for which link needs a message
  */
-static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 {
 	char speed[SPEED_SIZE] = "Unknown";
 	char fc[FC_SIZE] = "RX/TX";
 
+	if (vsi->current_isup == isup)
+		return;
+	vsi->current_isup = isup;
 	if (!isup) {
 		netdev_info(vsi->netdev, "NIC Link is Down\n");
 		return;
@@ -5217,15 +5318,13 @@
 			 "VSI reinit requested\n");
 		for (v = 0; v < pf->num_alloc_vsi; v++) {
 			struct i40e_vsi *vsi = pf->vsi[v];
+
 			if (vsi != NULL &&
 			    test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
 				i40e_vsi_reinit_locked(pf->vsi[v]);
 				clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
 			}
 		}
-
-		/* no further action needed, so return now */
-		return;
 	} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
 		int v;
 
@@ -5233,6 +5332,7 @@
 		dev_info(&pf->pdev->dev, "VSI down requested\n");
 		for (v = 0; v < pf->num_alloc_vsi; v++) {
 			struct i40e_vsi *vsi = pf->vsi[v];
+
 			if (vsi != NULL &&
 			    test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
 				set_bit(__I40E_DOWN, &vsi->state);
@@ -5240,13 +5340,9 @@
 				clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
 			}
 		}
-
-		/* no further action needed, so return now */
-		return;
 	} else {
 		dev_info(&pf->pdev->dev,
 			 "bad reset request 0x%08x\n", reset_flags);
-		return;
 	}
 }
 
@@ -5302,8 +5398,7 @@
 		dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
 	}
 
-	dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
-		need_reconfig);
+	dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
 	return need_reconfig;
 }
 
@@ -5330,16 +5425,14 @@
 	/* Ignore if event is not for Nearest Bridge */
 	type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
 		& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
-	dev_dbg(&pf->pdev->dev,
-		"%s: LLDP event mib bridge type 0x%x\n", __func__, type);
+	dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
 	if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
 		return ret;
 
 	/* Check MIB Type and return if event for Remote MIB update */
 	type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
 	dev_dbg(&pf->pdev->dev,
-		"%s: LLDP event mib type %s\n", __func__,
-		type ? "remote" : "local");
+		"LLDP event mib type %s\n", type ? "remote" : "local");
 	if (type == I40E_AQ_LLDP_MIB_REMOTE) {
 		/* Update the remote cached instance and return */
 		ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
@@ -5524,7 +5617,9 @@
  **/
 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
 {
+	struct i40e_fdir_filter *filter;
 	u32 fcnt_prog, fcnt_avail;
+	struct hlist_node *node;
 
 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
 		return;
@@ -5553,6 +5648,18 @@
 				dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
 		}
 	}
+
+	/* if hw had a problem adding a filter, delete it */
+	if (pf->fd_inv > 0) {
+		hlist_for_each_entry_safe(filter, node,
+					  &pf->fdir_filter_list, fdir_node) {
+			if (filter->fd_id == pf->fd_inv) {
+				hlist_del(&filter->fdir_node);
+				kfree(filter);
+				pf->fdir_pf_active_filters--;
+			}
+		}
+	}
 }
 
 #define I40E_MIN_FD_FLUSH_INTERVAL 10
@@ -5572,49 +5679,51 @@
 	if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
 		return;
 
-	if (time_after(jiffies, pf->fd_flush_timestamp +
-				(I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
-		/* If the flush is happening too quick and we have mostly
-		 * SB rules we should not re-enable ATR for some time.
-		 */
-		min_flush_time = pf->fd_flush_timestamp
-				+ (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
-		fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
+	if (!time_after(jiffies, pf->fd_flush_timestamp +
+				 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
+		return;
 
-		if (!(time_after(jiffies, min_flush_time)) &&
-		    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
-			if (I40E_DEBUG_FD & pf->hw.debug_mask)
-				dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
-			disable_atr = true;
-		}
+	/* If the flush is happening too quick and we have mostly SB rules we
+	 * should not re-enable ATR for some time.
+	 */
+	min_flush_time = pf->fd_flush_timestamp +
+			 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
+	fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
 
-		pf->fd_flush_timestamp = jiffies;
-		pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-		/* flush all filters */
-		wr32(&pf->hw, I40E_PFQF_CTL_1,
-		     I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
-		i40e_flush(&pf->hw);
-		pf->fd_flush_cnt++;
-		pf->fd_add_err = 0;
-		do {
-			/* Check FD flush status every 5-6msec */
-			usleep_range(5000, 6000);
-			reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
-			if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
-				break;
-		} while (flush_wait_retry--);
-		if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
-			dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
-		} else {
-			/* replay sideband filters */
-			i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
-			if (!disable_atr)
-				pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-			clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
-			if (I40E_DEBUG_FD & pf->hw.debug_mask)
-				dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
-		}
+	if (!(time_after(jiffies, min_flush_time)) &&
+	    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+		if (I40E_DEBUG_FD & pf->hw.debug_mask)
+			dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+		disable_atr = true;
 	}
+
+	pf->fd_flush_timestamp = jiffies;
+	pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+	/* flush all filters */
+	wr32(&pf->hw, I40E_PFQF_CTL_1,
+	     I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+	i40e_flush(&pf->hw);
+	pf->fd_flush_cnt++;
+	pf->fd_add_err = 0;
+	do {
+		/* Check FD flush status every 5-6msec */
+		usleep_range(5000, 6000);
+		reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
+		if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
+			break;
+	} while (flush_wait_retry--);
+	if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
+		dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
+	} else {
+		/* replay sideband filters */
+		i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+		if (!disable_atr)
+			pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+		clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+		if (I40E_DEBUG_FD & pf->hw.debug_mask)
+			dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+	}
+
 }
 
 /**
@@ -5722,15 +5831,23 @@
  **/
 static void i40e_link_event(struct i40e_pf *pf)
 {
-	bool new_link, old_link;
 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 	u8 new_link_speed, old_link_speed;
+	i40e_status status;
+	bool new_link, old_link;
 
 	/* set this to force the get_link_status call to refresh state */
 	pf->hw.phy.get_link_info = true;
 
 	old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
-	new_link = i40e_get_link_status(&pf->hw);
+
+	status = i40e_get_link_status(&pf->hw, &new_link);
+	if (status) {
+		dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
+			status);
+		return;
+	}
+
 	old_link_speed = pf->hw.phy.link_info_old.link_speed;
 	new_link_speed = pf->hw.phy.link_info.link_speed;
 
@@ -5759,68 +5876,6 @@
 }
 
 /**
- * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
- * @pf: board private structure
- *
- * Set the per-queue flags to request a check for stuck queues in the irq
- * clean functions, then force interrupts to be sure the irq clean is called.
- **/
-static void i40e_check_hang_subtask(struct i40e_pf *pf)
-{
-	int i, v;
-
-	/* If we're down or resetting, just bail */
-	if (test_bit(__I40E_DOWN, &pf->state) ||
-	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
-		return;
-
-	/* for each VSI/netdev
-	 *     for each Tx queue
-	 *         set the check flag
-	 *     for each q_vector
-	 *         force an interrupt
-	 */
-	for (v = 0; v < pf->num_alloc_vsi; v++) {
-		struct i40e_vsi *vsi = pf->vsi[v];
-		int armed = 0;
-
-		if (!pf->vsi[v] ||
-		    test_bit(__I40E_DOWN, &vsi->state) ||
-		    (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
-			continue;
-
-		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			set_check_for_tx_hang(vsi->tx_rings[i]);
-			if (test_bit(__I40E_HANG_CHECK_ARMED,
-				     &vsi->tx_rings[i]->state))
-				armed++;
-		}
-
-		if (armed) {
-			if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
-				wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
-				     (I40E_PFINT_DYN_CTL0_INTENA_MASK |
-				      I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
-				      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
-				      I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
-				      I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
-			} else {
-				u16 vec = vsi->base_vector - 1;
-				u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
-				      I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
-				      I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
-				      I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
-				      I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
-				for (i = 0; i < vsi->num_q_vectors; i++, vec++)
-					wr32(&vsi->back->hw,
-					     I40E_PFINT_DYN_CTLN(vec), val);
-			}
-			i40e_flush(&vsi->back->hw);
-		}
-	}
-}
-
-/**
  * i40e_watchdog_subtask - periodic checks not using event driven response
  * @pf: board private structure
  **/
@@ -5839,8 +5894,8 @@
 		return;
 	pf->service_timer_previous = jiffies;
 
-	i40e_check_hang_subtask(pf);
-	i40e_link_event(pf);
+	if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
+		i40e_link_event(pf);
 
 	/* Update the stats for active netdevs so the network stack
 	 * can look at updated numbers whenever it cares to
@@ -5849,10 +5904,12 @@
 		if (pf->vsi[i] && pf->vsi[i]->netdev)
 			i40e_update_stats(pf->vsi[i]);
 
-	/* Update the stats for the active switching components */
-	for (i = 0; i < I40E_MAX_VEB; i++)
-		if (pf->veb[i])
-			i40e_update_veb_stats(pf->veb[i]);
+	if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
+		/* Update the stats for the active switching components */
+		for (i = 0; i < I40E_MAX_VEB; i++)
+			if (pf->veb[i])
+				i40e_update_veb_stats(pf->veb[i]);
+	}
 
 	i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
 }
@@ -6231,6 +6288,7 @@
 
 		if (pf->vsi[v]->veb_idx == veb->idx) {
 			struct i40e_vsi *vsi = pf->vsi[v];
+
 			vsi->uplink_seid = veb->seid;
 			ret = i40e_add_vsi(vsi);
 			if (ret) {
@@ -6513,9 +6571,7 @@
 	}
 #endif /* CONFIG_I40E_DCB */
 #ifdef I40E_FCOE
-	ret = i40e_init_pf_fcoe(pf);
-	if (ret)
-		dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
+	i40e_init_pf_fcoe(pf);
 
 #endif
 	/* do basic switch setup */
@@ -6807,6 +6863,7 @@
 		return;
 	}
 
+	i40e_detect_recover_hung(pf);
 	i40e_reset_subtask(pf);
 	i40e_handle_mdd_event(pf);
 	i40e_vc_process_vflr_event(pf);
@@ -7565,7 +7622,7 @@
 			 "Cannot set RSS key, err %s aq_err %s\n",
 			 i40e_stat_str(&pf->hw, ret),
 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-		return ret;
+		goto config_rss_aq_out;
 	}
 
 	if (vsi->type == I40E_VSI_MAIN)
@@ -7579,6 +7636,8 @@
 			 i40e_stat_str(&pf->hw, ret),
 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
+config_rss_aq_out:
+	kfree(rss_lut);
 	return ret;
 }
 
@@ -7853,6 +7912,7 @@
 	/* Set default capability flags */
 	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
 		    I40E_FLAG_MSI_ENABLED     |
+		    I40E_FLAG_LINK_POLLING_ENABLED |
 		    I40E_FLAG_MSIX_ENABLED;
 
 	if (iommu_present(&pci_bus_type))
@@ -7913,9 +7973,7 @@
 	}
 
 #ifdef I40E_FCOE
-	err = i40e_init_pf_fcoe(pf);
-	if (err)
-		dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
+	i40e_init_pf_fcoe(pf);
 
 #endif /* I40E_FCOE */
 #ifdef CONFIG_PCI_IOV
@@ -7939,6 +7997,9 @@
 	pf->lan_veb = I40E_NO_VEB;
 	pf->lan_vsi = I40E_NO_VSI;
 
+	/* By default FW has this off for performance reasons */
+	pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+
 	/* set up queue assignment tracking */
 	size = sizeof(struct i40e_lump_tracking)
 		+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
@@ -8118,9 +8179,6 @@
 		pf->vxlan_ports[idx] = 0;
 		pf->pending_vxlan_bitmap |= BIT_ULL(idx);
 		pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
-
-		dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
-			 ntohs(port));
 	} else {
 		netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
 			    ntohs(port));
@@ -8770,7 +8828,7 @@
 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
 		i40e_del_filter(vsi, f->macaddr, f->vlan,
 				f->is_vf, f->is_netdev);
-	i40e_sync_vsi_filters(vsi);
+	i40e_sync_vsi_filters(vsi, false);
 
 	i40e_vsi_delete(vsi);
 	i40e_vsi_free_q_vectors(vsi);
@@ -8995,8 +9053,7 @@
 		if (veb) {
 			if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
 				dev_info(&vsi->back->pdev->dev,
-					 "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
-					 __func__);
+					 "New VSI creation error, uplink seid of LAN VSI expected.\n");
 				return NULL;
 			}
 			/* We come up by default in VEPA mode if SRIOV is not
@@ -9646,6 +9703,7 @@
 	} else {
 		/* force a reset of TC and queue layout configurations */
 		u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+
 		pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
 		pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
 		i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
@@ -9856,6 +9914,9 @@
 	}
 	if (pf->flags & I40E_FLAG_DCB_CAPABLE)
 		buf += sprintf(buf, "DCB ");
+#if IS_ENABLED(CONFIG_VXLAN)
+	buf += sprintf(buf, "VxLAN ");
+#endif
 	if (pf->flags & I40E_FLAG_PTP)
 		buf += sprintf(buf, "PTP ");
 #ifdef I40E_FCOE
@@ -9882,10 +9943,10 @@
 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	struct i40e_aq_get_phy_abilities_resp abilities;
-	unsigned long ioremap_len;
 	struct i40e_pf *pf;
 	struct i40e_hw *hw;
 	static u16 pfs_found;
+	u16 wol_nvm_bits;
 	u16 link_status;
 	int err = 0;
 	u32 len;
@@ -9935,15 +9996,15 @@
 	hw = &pf->hw;
 	hw->back = pf;
 
-	ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
-			    I40E_MAX_CSR_SPACE);
+	pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
+				I40E_MAX_CSR_SPACE);
 
-	hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
+	hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
 	if (!hw->hw_addr) {
 		err = -EIO;
 		dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
 			 (unsigned int)pci_resource_start(pdev, 0),
-			 (unsigned int)pci_resource_len(pdev, 0), err);
+			 pf->ioremap_len, err);
 		goto err_ioremap;
 	}
 	hw->vendor_id = pdev->vendor;
@@ -10101,10 +10162,13 @@
 	INIT_WORK(&pf->service_task, i40e_service_task);
 	clear_bit(__I40E_SERVICE_SCHED, &pf->state);
 	pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
-	pf->link_check_timeout = jiffies;
 
-	/* WoL defaults to disabled */
-	pf->wol_en = false;
+	/* NVM bit on means WoL disabled for the port */
+	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+	if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1)
+		pf->wol_en = false;
+	else
+		pf->wol_en = true;
 	device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
 
 	/* set up the main switch operations */
@@ -10436,7 +10500,7 @@
 	int err;
 	u32 reg;
 
-	dev_info(&pdev->dev, "%s\n", __func__);
+	dev_dbg(&pdev->dev, "%s\n", __func__);
 	if (pci_enable_device_mem(pdev)) {
 		dev_info(&pdev->dev,
 			 "Cannot re-enable PCI device after reset.\n");
@@ -10476,13 +10540,13 @@
 {
 	struct i40e_pf *pf = pci_get_drvdata(pdev);
 
-	dev_info(&pdev->dev, "%s\n", __func__);
+	dev_dbg(&pdev->dev, "%s\n", __func__);
 	if (test_bit(__I40E_SUSPENDED, &pf->state))
 		return;
 
 	rtnl_lock();
 	i40e_handle_reset_warning(pf);
-	rtnl_lock();
+	rtnl_unlock();
 }
 
 /**
@@ -10568,9 +10632,7 @@
 
 	err = pci_enable_device_mem(pdev);
 	if (err) {
-		dev_err(&pdev->dev,
-			"%s: Cannot enable PCI device from suspend\n",
-			__func__);
+		dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
 		return err;
 	}
 	pci_set_master(pdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 9b83abc..2142e10 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -418,6 +418,10 @@
 				     bool last_command)
 {
 	i40e_status ret_code = I40E_ERR_NVM;
+	struct i40e_asq_cmd_details cmd_details;
+
+	memset(&cmd_details, 0, sizeof(cmd_details));
+	cmd_details.wb_desc = &hw->nvm_wb_desc;
 
 	/* Here we are checking the SR limit only for the flat memory model.
 	 * We cannot do it for the module-based model, as we did not acquire
@@ -443,7 +447,7 @@
 		ret_code = i40e_aq_update_nvm(hw, module_pointer,
 					      2 * offset,  /*bytes*/
 					      2 * words,   /*bytes*/
-					      data, last_command, NULL);
+					      data, last_command, &cmd_details);
 
 	return ret_code;
 }
@@ -543,11 +547,13 @@
 {
 	i40e_status ret_code = 0;
 	u16 checksum;
+	__le16 le_sum;
 
 	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+	le_sum = cpu_to_le16(checksum);
 	if (!ret_code)
 		ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
-					     1, &checksum, true);
+					     1, &le_sum, true);
 
 	return ret_code;
 }
@@ -592,25 +598,31 @@
 
 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
 					  struct i40e_nvm_access *cmd,
-					  u8 *bytes, int *errno);
+					  u8 *bytes, int *perrno);
 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
 					     struct i40e_nvm_access *cmd,
-					     u8 *bytes, int *errno);
+					     u8 *bytes, int *perrno);
 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
 					     struct i40e_nvm_access *cmd,
 					     u8 *bytes, int *errno);
 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
 						struct i40e_nvm_access *cmd,
-						int *errno);
+						int *perrno);
 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
 					 struct i40e_nvm_access *cmd,
-					 int *errno);
+					 int *perrno);
 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
 					 struct i40e_nvm_access *cmd,
-					 u8 *bytes, int *errno);
+					 u8 *bytes, int *perrno);
 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
 					struct i40e_nvm_access *cmd,
-					u8 *bytes, int *errno);
+					u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+				       struct i40e_nvm_access *cmd,
+				       u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+					     struct i40e_nvm_access *cmd,
+					     u8 *bytes, int *perrno);
 static inline u8 i40e_nvmupd_get_module(u32 val)
 {
 	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -634,6 +646,9 @@
 	"I40E_NVMUPD_CSUM_CON",
 	"I40E_NVMUPD_CSUM_SA",
 	"I40E_NVMUPD_CSUM_LCB",
+	"I40E_NVMUPD_STATUS",
+	"I40E_NVMUPD_EXEC_AQ",
+	"I40E_NVMUPD_GET_AQ_RESULT",
 };
 
 /**
@@ -641,30 +656,60 @@
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command
  * @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * Dispatches command depending on what update state is current
  **/
 i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
 				struct i40e_nvm_access *cmd,
-				u8 *bytes, int *errno)
+				u8 *bytes, int *perrno)
 {
 	i40e_status status;
+	enum i40e_nvmupd_cmd upd_cmd;
 
 	/* assume success */
-	*errno = 0;
+	*perrno = 0;
+
+	/* early check for status command and debug msgs */
+	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+	i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
+		   i40e_nvm_update_state_str[upd_cmd],
+		   hw->nvmupd_state,
+		   hw->aq.nvm_release_on_done);
+
+	if (upd_cmd == I40E_NVMUPD_INVALID) {
+		*perrno = -EFAULT;
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "i40e_nvmupd_validate_command returns %d errno %d\n",
+			   upd_cmd, *perrno);
+	}
+
+	/* a status request returns immediately rather than
+	 * going into the state machine
+	 */
+	if (upd_cmd == I40E_NVMUPD_STATUS) {
+		bytes[0] = hw->nvmupd_state;
+		return 0;
+	}
 
 	switch (hw->nvmupd_state) {
 	case I40E_NVMUPD_STATE_INIT:
-		status = i40e_nvmupd_state_init(hw, cmd, bytes, errno);
+		status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
 		break;
 
 	case I40E_NVMUPD_STATE_READING:
-		status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno);
+		status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
 		break;
 
 	case I40E_NVMUPD_STATE_WRITING:
-		status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno);
+		status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
+		break;
+
+	case I40E_NVMUPD_STATE_INIT_WAIT:
+	case I40E_NVMUPD_STATE_WRITE_WAIT:
+		status = I40E_ERR_NOT_READY;
+		*perrno = -EBUSY;
 		break;
 
 	default:
@@ -672,7 +717,7 @@
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "NVMUPD: no such state %d\n", hw->nvmupd_state);
 		status = I40E_NOT_SUPPORTED;
-		*errno = -ESRCH;
+		*perrno = -ESRCH;
 		break;
 	}
 	return status;
@@ -683,28 +728,28 @@
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
  * @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * Process legitimate commands of the Init state and conditionally set next
  * state. Reject all other commands.
  **/
 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
 					  struct i40e_nvm_access *cmd,
-					  u8 *bytes, int *errno)
+					  u8 *bytes, int *perrno)
 {
 	i40e_status status = 0;
 	enum i40e_nvmupd_cmd upd_cmd;
 
-	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 
 	switch (upd_cmd) {
 	case I40E_NVMUPD_READ_SA:
 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 		if (status) {
-			*errno = i40e_aq_rc_to_posix(status,
+			*perrno = i40e_aq_rc_to_posix(status,
 						     hw->aq.asq_last_status);
 		} else {
-			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
 			i40e_release_nvm(hw);
 		}
 		break;
@@ -712,10 +757,10 @@
 	case I40E_NVMUPD_READ_SNT:
 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 		if (status) {
-			*errno = i40e_aq_rc_to_posix(status,
+			*perrno = i40e_aq_rc_to_posix(status,
 						     hw->aq.asq_last_status);
 		} else {
-			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
 			if (status)
 				i40e_release_nvm(hw);
 			else
@@ -726,70 +771,83 @@
 	case I40E_NVMUPD_WRITE_ERA:
 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
 		if (status) {
-			*errno = i40e_aq_rc_to_posix(status,
+			*perrno = i40e_aq_rc_to_posix(status,
 						     hw->aq.asq_last_status);
 		} else {
-			status = i40e_nvmupd_nvm_erase(hw, cmd, errno);
-			if (status)
+			status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
+			if (status) {
 				i40e_release_nvm(hw);
-			else
+			} else {
 				hw->aq.nvm_release_on_done = true;
+				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+			}
 		}
 		break;
 
 	case I40E_NVMUPD_WRITE_SA:
 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
 		if (status) {
-			*errno = i40e_aq_rc_to_posix(status,
+			*perrno = i40e_aq_rc_to_posix(status,
 						     hw->aq.asq_last_status);
 		} else {
-			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
-			if (status)
+			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+			if (status) {
 				i40e_release_nvm(hw);
-			else
+			} else {
 				hw->aq.nvm_release_on_done = true;
+				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+			}
 		}
 		break;
 
 	case I40E_NVMUPD_WRITE_SNT:
 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
 		if (status) {
-			*errno = i40e_aq_rc_to_posix(status,
+			*perrno = i40e_aq_rc_to_posix(status,
 						     hw->aq.asq_last_status);
 		} else {
-			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
 			if (status)
 				i40e_release_nvm(hw);
 			else
-				hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+				hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
 		}
 		break;
 
 	case I40E_NVMUPD_CSUM_SA:
 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
 		if (status) {
-			*errno = i40e_aq_rc_to_posix(status,
+			*perrno = i40e_aq_rc_to_posix(status,
 						     hw->aq.asq_last_status);
 		} else {
 			status = i40e_update_nvm_checksum(hw);
 			if (status) {
-				*errno = hw->aq.asq_last_status ?
+				*perrno = hw->aq.asq_last_status ?
 				   i40e_aq_rc_to_posix(status,
 						       hw->aq.asq_last_status) :
 				   -EIO;
 				i40e_release_nvm(hw);
 			} else {
 				hw->aq.nvm_release_on_done = true;
+				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
 			}
 		}
 		break;
 
+	case I40E_NVMUPD_EXEC_AQ:
+		status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
+		break;
+
+	case I40E_NVMUPD_GET_AQ_RESULT:
+		status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
+		break;
+
 	default:
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "NVMUPD: bad cmd %s in init state\n",
 			   i40e_nvm_update_state_str[upd_cmd]);
 		status = I40E_ERR_NVM;
-		*errno = -ESRCH;
+		*perrno = -ESRCH;
 		break;
 	}
 	return status;
@@ -800,28 +858,28 @@
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
  * @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * NVM ownership is already held.  Process legitimate commands and set any
  * change in state; reject all other commands.
  **/
 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
 					     struct i40e_nvm_access *cmd,
-					     u8 *bytes, int *errno)
+					     u8 *bytes, int *perrno)
 {
-	i40e_status status;
+	i40e_status status = 0;
 	enum i40e_nvmupd_cmd upd_cmd;
 
-	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 
 	switch (upd_cmd) {
 	case I40E_NVMUPD_READ_SA:
 	case I40E_NVMUPD_READ_CON:
-		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
 		break;
 
 	case I40E_NVMUPD_READ_LCB:
-		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
 		i40e_release_nvm(hw);
 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 		break;
@@ -831,7 +889,7 @@
 			   "NVMUPD: bad cmd %s in reading state.\n",
 			   i40e_nvm_update_state_str[upd_cmd]);
 		status = I40E_NOT_SUPPORTED;
-		*errno = -ESRCH;
+		*perrno = -ESRCH;
 		break;
 	}
 	return status;
@@ -842,55 +900,68 @@
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
  * @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * NVM ownership is already held.  Process legitimate commands and set any
  * change in state; reject all other commands
  **/
 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
 					     struct i40e_nvm_access *cmd,
-					     u8 *bytes, int *errno)
+					     u8 *bytes, int *perrno)
 {
-	i40e_status status;
+	i40e_status status = 0;
 	enum i40e_nvmupd_cmd upd_cmd;
 	bool retry_attempt = false;
 
-	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 
 retry:
 	switch (upd_cmd) {
 	case I40E_NVMUPD_WRITE_CON:
-		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+		if (!status)
+			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
 		break;
 
 	case I40E_NVMUPD_WRITE_LCB:
-		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
-		if (!status)
+		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+		if (status) {
+			*perrno = hw->aq.asq_last_status ?
+				   i40e_aq_rc_to_posix(status,
+						       hw->aq.asq_last_status) :
+				   -EIO;
+			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+		} else {
 			hw->aq.nvm_release_on_done = true;
-		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+		}
 		break;
 
 	case I40E_NVMUPD_CSUM_CON:
 		status = i40e_update_nvm_checksum(hw);
 		if (status) {
-			*errno = hw->aq.asq_last_status ?
+			*perrno = hw->aq.asq_last_status ?
 				   i40e_aq_rc_to_posix(status,
 						       hw->aq.asq_last_status) :
 				   -EIO;
 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+		} else {
+			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
 		}
 		break;
 
 	case I40E_NVMUPD_CSUM_LCB:
 		status = i40e_update_nvm_checksum(hw);
-		if (status)
-			*errno = hw->aq.asq_last_status ?
+		if (status) {
+			*perrno = hw->aq.asq_last_status ?
 				   i40e_aq_rc_to_posix(status,
 						       hw->aq.asq_last_status) :
 				   -EIO;
-		else
+			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+		} else {
 			hw->aq.nvm_release_on_done = true;
-		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+		}
 		break;
 
 	default:
@@ -898,7 +969,7 @@
 			   "NVMUPD: bad cmd %s in writing state.\n",
 			   i40e_nvm_update_state_str[upd_cmd]);
 		status = I40E_NOT_SUPPORTED;
-		*errno = -ESRCH;
+		*perrno = -ESRCH;
 		break;
 	}
 
@@ -941,21 +1012,22 @@
  * i40e_nvmupd_validate_command - Validate given command
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * Return one of the valid command types or I40E_NVMUPD_INVALID
  **/
 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
 						 struct i40e_nvm_access *cmd,
-						 int *errno)
+						 int *perrno)
 {
 	enum i40e_nvmupd_cmd upd_cmd;
-	u8 transaction;
+	u8 module, transaction;
 
 	/* anything that doesn't match a recognized case is an error */
 	upd_cmd = I40E_NVMUPD_INVALID;
 
 	transaction = i40e_nvmupd_get_transaction(cmd->config);
+	module = i40e_nvmupd_get_module(cmd->config);
 
 	/* limits on data size */
 	if ((cmd->data_size < 1) ||
@@ -963,7 +1035,7 @@
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "i40e_nvmupd_validate_command data_size %d\n",
 			   cmd->data_size);
-		*errno = -EFAULT;
+		*perrno = -EFAULT;
 		return I40E_NVMUPD_INVALID;
 	}
 
@@ -982,6 +1054,12 @@
 		case I40E_NVM_SA:
 			upd_cmd = I40E_NVMUPD_READ_SA;
 			break;
+		case I40E_NVM_EXEC:
+			if (module == 0xf)
+				upd_cmd = I40E_NVMUPD_STATUS;
+			else if (module == 0)
+				upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+			break;
 		}
 		break;
 
@@ -1011,36 +1089,171 @@
 		case (I40E_NVM_CSUM|I40E_NVM_LCB):
 			upd_cmd = I40E_NVMUPD_CSUM_LCB;
 			break;
+		case I40E_NVM_EXEC:
+			if (module == 0)
+				upd_cmd = I40E_NVMUPD_EXEC_AQ;
+			break;
 		}
 		break;
 	}
-	i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
-		   i40e_nvm_update_state_str[upd_cmd],
-		   hw->nvmupd_state,
-		   hw->aq.nvm_release_on_done);
 
-	if (upd_cmd == I40E_NVMUPD_INVALID) {
-		*errno = -EFAULT;
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "i40e_nvmupd_validate_command returns %d errno %d\n",
-			   upd_cmd, *errno);
-	}
 	return upd_cmd;
 }
 
 /**
+ * i40e_nvmupd_exec_aq - Run an AQ command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+				       struct i40e_nvm_access *cmd,
+				       u8 *bytes, int *perrno)
+{
+	struct i40e_asq_cmd_details cmd_details;
+	i40e_status status;
+	struct i40e_aq_desc *aq_desc;
+	u32 buff_size = 0;
+	u8 *buff = NULL;
+	u32 aq_desc_len;
+	u32 aq_data_len;
+
+	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+	memset(&cmd_details, 0, sizeof(cmd_details));
+	cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+	aq_desc_len = sizeof(struct i40e_aq_desc);
+	memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+
+	/* get the aq descriptor */
+	if (cmd->data_size < aq_desc_len) {
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
+			   cmd->data_size, aq_desc_len);
+		*perrno = -EINVAL;
+		return I40E_ERR_PARAM;
+	}
+	aq_desc = (struct i40e_aq_desc *)bytes;
+
+	/* if data buffer needed, make sure it's ready */
+	aq_data_len = cmd->data_size - aq_desc_len;
+	buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
+	if (buff_size) {
+		if (!hw->nvm_buff.va) {
+			status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
+							hw->aq.asq_buf_size);
+			if (status)
+				i40e_debug(hw, I40E_DEBUG_NVM,
+					   "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
+					   status);
+		}
+
+		if (hw->nvm_buff.va) {
+			buff = hw->nvm_buff.va;
+			memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+		}
+	}
+
+	/* and away we go! */
+	status = i40e_asq_send_command(hw, aq_desc, buff,
+				       buff_size, &cmd_details);
+	if (status) {
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "i40e_nvmupd_exec_aq err %s aq_err %s\n",
+			   i40e_stat_str(hw, status),
+			   i40e_aq_str(hw, hw->aq.asq_last_status));
+		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+					     struct i40e_nvm_access *cmd,
+					     u8 *bytes, int *perrno)
+{
+	u32 aq_total_len;
+	u32 aq_desc_len;
+	int remainder;
+	u8 *buff;
+
+	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+	aq_desc_len = sizeof(struct i40e_aq_desc);
+	aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
+
+	/* check offset range */
+	if (cmd->offset > aq_total_len) {
+		i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
+			   __func__, cmd->offset, aq_total_len);
+		*perrno = -EINVAL;
+		return I40E_ERR_PARAM;
+	}
+
+	/* check copylength range */
+	if (cmd->data_size > (aq_total_len - cmd->offset)) {
+		int new_len = aq_total_len - cmd->offset;
+
+		i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
+			   __func__, cmd->data_size, new_len);
+		cmd->data_size = new_len;
+	}
+
+	remainder = cmd->data_size;
+	if (cmd->offset < aq_desc_len) {
+		u32 len = aq_desc_len - cmd->offset;
+
+		len = min(len, cmd->data_size);
+		i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
+			   __func__, cmd->offset, cmd->offset + len);
+
+		buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
+		memcpy(bytes, buff, len);
+
+		bytes += len;
+		remainder -= len;
+		buff = hw->nvm_buff.va;
+	} else {
+		buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+	}
+
+	if (remainder > 0) {
+		int start_byte = buff - (u8 *)hw->nvm_buff.va;
+
+		i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
+			   __func__, start_byte, start_byte + remainder);
+		memcpy(bytes, buff, remainder);
+	}
+
+	return 0;
+}
+
+/**
  * i40e_nvmupd_nvm_read - Read NVM
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
  * @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * cmd structure contains identifiers and data buffer
  **/
 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
 					struct i40e_nvm_access *cmd,
-					u8 *bytes, int *errno)
+					u8 *bytes, int *perrno)
 {
+	struct i40e_asq_cmd_details cmd_details;
 	i40e_status status;
 	u8 module, transaction;
 	bool last;
@@ -1049,8 +1262,11 @@
 	module = i40e_nvmupd_get_module(cmd->config);
 	last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
 
+	memset(&cmd_details, 0, sizeof(cmd_details));
+	cmd_details.wb_desc = &hw->nvm_wb_desc;
+
 	status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
-				  bytes, last, NULL);
+				  bytes, last, &cmd_details);
 	if (status) {
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
@@ -1058,7 +1274,7 @@
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "i40e_nvmupd_nvm_read status %d aq %d\n",
 			   status, hw->aq.asq_last_status);
-		*errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
 	}
 
 	return status;
@@ -1068,23 +1284,28 @@
  * i40e_nvmupd_nvm_erase - Erase an NVM module
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * module, offset, data_size and data are in cmd structure
  **/
 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
 					 struct i40e_nvm_access *cmd,
-					 int *errno)
+					 int *perrno)
 {
 	i40e_status status = 0;
+	struct i40e_asq_cmd_details cmd_details;
 	u8 module, transaction;
 	bool last;
 
 	transaction = i40e_nvmupd_get_transaction(cmd->config);
 	module = i40e_nvmupd_get_module(cmd->config);
 	last = (transaction & I40E_NVM_LCB);
+
+	memset(&cmd_details, 0, sizeof(cmd_details));
+	cmd_details.wb_desc = &hw->nvm_wb_desc;
+
 	status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
-				   last, NULL);
+				   last, &cmd_details);
 	if (status) {
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
@@ -1092,7 +1313,7 @@
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "i40e_nvmupd_nvm_erase status %d aq %d\n",
 			   status, hw->aq.asq_last_status);
-		*errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
 	}
 
 	return status;
@@ -1103,15 +1324,16 @@
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
  * @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * module, offset, data_size and data are in cmd structure
  **/
 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
 					 struct i40e_nvm_access *cmd,
-					 u8 *bytes, int *errno)
+					 u8 *bytes, int *perrno)
 {
 	i40e_status status = 0;
+	struct i40e_asq_cmd_details cmd_details;
 	u8 module, transaction;
 	bool last;
 
@@ -1119,8 +1341,12 @@
 	module = i40e_nvmupd_get_module(cmd->config);
 	last = (transaction & I40E_NVM_LCB);
 
+	memset(&cmd_details, 0, sizeof(cmd_details));
+	cmd_details.wb_desc = &hw->nvm_wb_desc;
+
 	status = i40e_aq_update_nvm(hw, module, cmd->offset,
-				    (u16)cmd->data_size, bytes, last, NULL);
+				    (u16)cmd->data_size, bytes, last,
+				    &cmd_details);
 	if (status) {
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
@@ -1128,7 +1354,7 @@
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "i40e_nvmupd_nvm_write status %d aq %d\n",
 			   status, hw->aq.asq_last_status);
-		*errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
 	}
 
 	return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index dcb72a8..e51e156 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -258,7 +258,7 @@
 i40e_status i40e_pf_reset(struct i40e_hw *hw);
 void i40e_clear_hw(struct i40e_hw *hw);
 void i40e_clear_pxe_mode(struct i40e_hw *hw);
-bool i40e_get_link_status(struct i40e_hw *hw);
+i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
 				      u32 *max_bw, u32 *min_bw, bool *min_valid,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 8c40d6e..552c84e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -618,9 +618,8 @@
 
 	/* Attempt to register the clock before enabling the hardware. */
 	pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
-	if (IS_ERR(pf->ptp_clock)) {
+	if (IS_ERR(pf->ptp_clock))
 		return PTR_ERR(pf->ptp_clock);
-	}
 
 	/* clear the hwtstamp settings here during clock create, instead of
 	 * during regular init, so that we can maintain settings across a
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 738aca6..01e5ece 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -465,10 +465,11 @@
 		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
 
 	if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+		pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
 		if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
 			dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
-				 rx_desc->wb.qword0.hi_dword.fd_id);
+				 pf->fd_inv);
 
 		/* Check if the programming error is for ATR.
 		 * If so, auto disable ATR and set a state for
@@ -601,27 +602,13 @@
 }
 
 /**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring:  tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
-	void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
-	return le32_to_cpu(*(volatile __le32 *)head);
-}
-
-/**
  * i40e_get_tx_pending - how many tx descriptors not processed
  * @tx_ring: the ring of descriptors
  *
  * Since there is no access to the ring head register
  * in XL710, we need to use our local copies
  **/
-static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+u32 i40e_get_tx_pending(struct i40e_ring *ring)
 {
 	u32 head, tail;
 
@@ -635,50 +622,6 @@
 	return 0;
 }
 
-/**
- * i40e_check_tx_hang - Is there a hang in the Tx queue
- * @tx_ring: the ring of descriptors
- **/
-static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
-{
-	u32 tx_done = tx_ring->stats.packets;
-	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
-	u32 tx_pending = i40e_get_tx_pending(tx_ring);
-	struct i40e_pf *pf = tx_ring->vsi->back;
-	bool ret = false;
-
-	clear_check_for_tx_hang(tx_ring);
-
-	/* Check for a hung queue, but be thorough. This verifies
-	 * that a transmit has been completed since the previous
-	 * check AND there is at least one packet pending. The
-	 * ARMED bit is set to indicate a potential hang. The
-	 * bit is cleared if a pause frame is received to remove
-	 * false hang detection due to PFC or 802.3x frames. By
-	 * requiring this to fail twice we avoid races with
-	 * PFC clearing the ARMED bit and conditions where we
-	 * run the check_tx_hang logic with a transmit completion
-	 * pending but without time to complete it yet.
-	 */
-	if ((tx_done_old == tx_done) && tx_pending) {
-		/* make sure it is true for two checks in a row */
-		ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
-				       &tx_ring->state);
-	} else if (tx_done_old == tx_done &&
-		   (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
-		if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
-			dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
-				 tx_pending, tx_ring->queue_index);
-		pf->tx_sluggish_count++;
-	} else {
-		/* update completed stats and disarm the hang check */
-		tx_ring->tx_stats.tx_done_old = tx_done;
-		clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
-	}
-
-	return ret;
-}
-
 #define WB_STRIDE 0x3
 
 /**
@@ -784,42 +727,21 @@
 	tx_ring->q_vector->tx.total_bytes += total_bytes;
 	tx_ring->q_vector->tx.total_packets += total_packets;
 
-	/* check to see if there are any non-cache aligned descriptors
-	 * waiting to be written back, and kick the hardware to force
-	 * them to be written back in case of napi polling
-	 */
-	if (budget &&
-	    !((i & WB_STRIDE) == WB_STRIDE) &&
-	    !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
-	    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
-		tx_ring->arm_wb = true;
-	else
-		tx_ring->arm_wb = false;
+	if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+		unsigned int j = 0;
 
-	if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
-		/* schedule immediate reset if we believe we hung */
-		dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
-			 "  VSI                  <%d>\n"
-			 "  Tx Queue             <%d>\n"
-			 "  next_to_use          <%x>\n"
-			 "  next_to_clean        <%x>\n",
-			 tx_ring->vsi->seid,
-			 tx_ring->queue_index,
-			 tx_ring->next_to_use, i);
-
-		netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-
-		dev_info(tx_ring->dev,
-			 "tx hang detected on queue %d, reset requested\n",
-			 tx_ring->queue_index);
-
-		/* do not fire the reset immediately, wait for the stack to
-		 * decide we are truly stuck, also prevents every queue from
-		 * simultaneously requesting a reset
+		/* check to see if there are < 4 descriptors
+		 * waiting to be written back, then kick the hardware to force
+		 * them to be written back in case we stay in NAPI.
+		 * In this mode on X722 we do not enable Interrupt.
 		 */
+		j = i40e_get_tx_pending(tx_ring);
 
-		/* the adapter is about to reset, no point in enabling polling */
-		budget = 1;
+		if (budget &&
+		    ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
+		    !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+		    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+			tx_ring->arm_wb = true;
 	}
 
 	netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
@@ -851,7 +773,7 @@
  * @q_vector: the vector  on which to force writeback
  *
  **/
-static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 {
 	u16 flags = q_vector->tx.ring[0].flags;
 
@@ -1002,6 +924,8 @@
 	if (!dev)
 		return -ENOMEM;
 
+	/* warn if we are about to overwrite the pointer */
+	WARN_ON(tx_ring->tx_bi);
 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
 	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
 	if (!tx_ring->tx_bi)
@@ -1162,6 +1086,8 @@
 	struct device *dev = rx_ring->dev;
 	int bi_size;
 
+	/* warn if we are about to overwrite the pointer */
+	WARN_ON(rx_ring->rx_bi);
 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
 	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
 	if (!rx_ring->rx_bi)
@@ -1518,7 +1444,7 @@
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 	u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-	const int current_node = numa_node_id();
+	const int current_node = numa_mem_id();
 	struct i40e_vsi *vsi = rx_ring->vsi;
 	u16 i = rx_ring->next_to_clean;
 	union i40e_rx_desc *rx_desc;
@@ -1596,6 +1522,7 @@
 		cleaned_count++;
 		if (rx_hbo || rx_sph) {
 			int len;
+
 			if (rx_hbo)
 				len = I40E_RX_HDR_SIZE;
 			else
@@ -1781,9 +1708,6 @@
 		/* ERR_MASK will only have valid bits if EOP set */
 		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
-			/* TODO: shouldn't we increment a counter indicating the
-			 * drop?
-			 */
 			continue;
 		}
 
@@ -1862,8 +1786,7 @@
 		if (!test_bit(__I40E_DOWN, &vsi->state))
 			wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
 	} else {
-		i40e_irq_dynamic_enable(vsi,
-					q_vector->v_idx + vsi->base_vector);
+		i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
 	}
 	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
 		old_itr = q_vector->tx.itr;
@@ -1885,8 +1808,7 @@
 			wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
 			      vsi->base_vector - 1), val);
 	} else {
-		i40e_irq_dynamic_enable(vsi,
-					q_vector->v_idx + vsi->base_vector);
+		i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
 	}
 }
 
@@ -1921,6 +1843,7 @@
 	i40e_for_each_ring(ring, q_vector->tx) {
 		clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
 		arm_wb |= ring->arm_wb;
+		ring->arm_wb = false;
 	}
 
 	/* We attempt to distribute budget to each Rx queue fairly, but don't
@@ -2156,6 +2079,7 @@
 	/* else if it is a SW VLAN, check the next protocol and store the tag */
 	} else if (protocol == htons(ETH_P_8021Q)) {
 		struct vlan_hdr *vhdr, _vhdr;
+
 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
 		if (!vhdr)
 			return -EINVAL;
@@ -2324,6 +2248,9 @@
 			l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
 			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
 			break;
+		case IPPROTO_GRE:
+			l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
+			break;
 		default:
 			return;
 		}
@@ -2581,6 +2508,9 @@
 	u32 td_tag = 0;
 	dma_addr_t dma;
 	u16 gso_segs;
+	u16 desc_count = 0;
+	bool tail_bump = true;
+	bool do_rs = false;
 
 	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
 		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -2621,6 +2551,8 @@
 
 			tx_desc++;
 			i++;
+			desc_count++;
+
 			if (i == tx_ring->count) {
 				tx_desc = I40E_TX_DESC(tx_ring, 0);
 				i = 0;
@@ -2640,6 +2572,8 @@
 
 		tx_desc++;
 		i++;
+		desc_count++;
+
 		if (i == tx_ring->count) {
 			tx_desc = I40E_TX_DESC(tx_ring, 0);
 			i = 0;
@@ -2654,34 +2588,6 @@
 		tx_bi = &tx_ring->tx_bi[i];
 	}
 
-	/* Place RS bit on last descriptor of any packet that spans across the
-	 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
-	 */
-	if (((i & WB_STRIDE) != WB_STRIDE) &&
-	    (first <= &tx_ring->tx_bi[i]) &&
-	    (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
-		tx_desc->cmd_type_offset_bsz =
-			build_ctob(td_cmd, td_offset, size, td_tag) |
-			cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
-					 I40E_TXD_QW1_CMD_SHIFT);
-	} else {
-		tx_desc->cmd_type_offset_bsz =
-			build_ctob(td_cmd, td_offset, size, td_tag) |
-			cpu_to_le64((u64)I40E_TXD_CMD <<
-					 I40E_TXD_QW1_CMD_SHIFT);
-	}
-
-	netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
-						 tx_ring->queue_index),
-			     first->bytecount);
-
-	/* Force memory writes to complete before letting h/w
-	 * know there are new descriptors to fetch.  (Only
-	 * applicable for weak-ordered memory model archs,
-	 * such as IA-64).
-	 */
-	wmb();
-
 	/* set next_to_watch value indicating a packet is present */
 	first->next_to_watch = tx_desc;
 
@@ -2691,15 +2597,72 @@
 
 	tx_ring->next_to_use = i;
 
+	netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+						 tx_ring->queue_index),
+						 first->bytecount);
 	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+	/* Algorithm to optimize tail and RS bit setting:
+	 * if xmit_more is supported
+	 *	if xmit_more is true
+	 *		do not update tail and do not mark RS bit.
+	 *	if xmit_more is false and last xmit_more was false
+	 *		if every packet spanned less than 4 desc
+	 *			then set RS bit on 4th packet and update tail
+	 *			on every packet
+	 *		else
+	 *			update tail and set RS bit on every packet.
+	 *	if xmit_more is false and last_xmit_more was true
+	 *		update tail and set RS bit.
+	 *
+	 * Optimization: wmb to be issued only in case of tail update.
+	 * Also optimize the Descriptor WB path for RS bit with the same
+	 * algorithm.
+	 *
+	 * Note: If there are less than 4 packets
+	 * pending and interrupts were disabled the service task will
+	 * trigger a force WB.
+	 */
+	if (skb->xmit_more  &&
+	    !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+						    tx_ring->queue_index))) {
+		tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
+		tail_bump = false;
+	} else if (!skb->xmit_more &&
+		   !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+						       tx_ring->queue_index)) &&
+		   (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
+		   (tx_ring->packet_stride < WB_STRIDE) &&
+		   (desc_count < WB_STRIDE)) {
+		tx_ring->packet_stride++;
+	} else {
+		tx_ring->packet_stride = 0;
+		tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
+		do_rs = true;
+	}
+	if (do_rs)
+		tx_ring->packet_stride = 0;
+
+	tx_desc->cmd_type_offset_bsz =
+			build_ctob(td_cmd, td_offset, size, td_tag) |
+			cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
+						  I40E_TX_DESC_CMD_EOP) <<
+						  I40E_TXD_QW1_CMD_SHIFT);
+
 	/* notify HW of packet */
-	if (!skb->xmit_more ||
-	    netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
-						   tx_ring->queue_index)))
-		writel(i, tx_ring->tail);
-	else
+	if (!tail_bump)
 		prefetchw(tx_desc + 1);
 
+	if (tail_bump) {
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64).
+		 */
+		wmb();
+		writel(i, tx_ring->tail);
+	}
+
 	return;
 
 dma_error:
@@ -2776,6 +2739,7 @@
 	u8 hdr_len = 0;
 	int tsyn;
 	int tso;
+
 	if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
 		return NETDEV_TX_BUSY;
 
@@ -2808,10 +2772,11 @@
 	if (tsyn)
 		tx_flags |= I40E_TX_FLAGS_TSYN;
 
-	if (i40e_chk_linearize(skb, tx_flags))
+	if (i40e_chk_linearize(skb, tx_flags)) {
 		if (skb_linearize(skb))
 			goto out_drop;
-
+		tx_ring->tx_stats.tx_linearize++;
+	}
 	skb_tx_timestamp(skb);
 
 	/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index f1385a1..75cecfa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -79,12 +79,12 @@
 	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
-	BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
-	BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
-	BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
-	BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
-	BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
-	BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
 
 #define i40e_pf_get_default_rss_hena(pf) \
 	(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
@@ -165,6 +165,7 @@
 	};
 	unsigned int bytecount;
 	unsigned short gso_segs;
+
 	DEFINE_DMA_UNMAP_ADDR(dma);
 	DEFINE_DMA_UNMAP_LEN(len);
 	u32 tx_flags;
@@ -188,6 +189,7 @@
 	u64 restart_queue;
 	u64 tx_busy;
 	u64 tx_done_old;
+	u64 tx_linearize;
 };
 
 struct i40e_rx_queue_stats {
@@ -199,8 +201,6 @@
 enum i40e_ring_state_t {
 	__I40E_TX_FDIR_INIT_DONE,
 	__I40E_TX_XPS_INIT_DONE,
-	__I40E_TX_DETECT_HANG,
-	__I40E_HANG_CHECK_ARMED,
 	__I40E_RX_PS_ENABLED,
 	__I40E_RX_16BYTE_DESC_ENABLED,
 };
@@ -211,12 +211,6 @@
 	set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
 #define clear_ring_ps_enabled(ring) \
 	clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define check_for_tx_hang(ring) \
-	test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define set_check_for_tx_hang(ring) \
-	set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define clear_check_for_tx_hang(ring) \
-	clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
 #define ring_is_16byte_desc_enabled(ring) \
 	test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
 #define set_ring_16byte_desc_enabled(ring) \
@@ -264,10 +258,12 @@
 
 	bool ring_active;		/* is ring online or not */
 	bool arm_wb;		/* do something to arm write back */
+	u8 packet_stride;
 
 	u16 flags;
 #define I40E_TXR_FLAGS_WB_ON_ITR	BIT(0)
 #define I40E_TXR_FLAGS_OUTER_UDP_CSUM	BIT(1)
+#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
 
 	/* stats structs */
 	struct i40e_queue_stats	stats;
@@ -326,4 +322,20 @@
 int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
 			       struct i40e_ring *tx_ring, u32 *flags);
 #endif
+void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
+u32 i40e_get_tx_pending(struct i40e_ring *ring);
+
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring:  tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+	void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+	return le32_to_cpu(*(volatile __le32 *)head);
+}
 #endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 4842239..d1ec5a4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -45,6 +45,8 @@
 #define I40E_DEV_ID_QSFP_C		0x1585
 #define I40E_DEV_ID_10G_BASE_T		0x1586
 #define I40E_DEV_ID_20G_KR2		0x1587
+#define I40E_DEV_ID_20G_KR2_A		0x1588
+#define I40E_DEV_ID_10G_BASE_T4		0x1589
 #define I40E_DEV_ID_VF			0x154C
 #define I40E_DEV_ID_VF_HV		0x1571
 #define I40E_DEV_ID_SFP_X722		0x37D0
@@ -158,14 +160,14 @@
 };
 
 enum i40e_vsi_type {
-	I40E_VSI_MAIN = 0,
-	I40E_VSI_VMDQ1,
-	I40E_VSI_VMDQ2,
-	I40E_VSI_CTRL,
-	I40E_VSI_FCOE,
-	I40E_VSI_MIRROR,
-	I40E_VSI_SRIOV,
-	I40E_VSI_FDIR,
+	I40E_VSI_MAIN	= 0,
+	I40E_VSI_VMDQ1	= 1,
+	I40E_VSI_VMDQ2	= 2,
+	I40E_VSI_CTRL	= 3,
+	I40E_VSI_FCOE	= 4,
+	I40E_VSI_MIRROR	= 5,
+	I40E_VSI_SRIOV	= 6,
+	I40E_VSI_FDIR	= 7,
 	I40E_VSI_TYPE_UNKNOWN
 };
 
@@ -305,12 +307,17 @@
 	I40E_NVMUPD_CSUM_CON,
 	I40E_NVMUPD_CSUM_SA,
 	I40E_NVMUPD_CSUM_LCB,
+	I40E_NVMUPD_STATUS,
+	I40E_NVMUPD_EXEC_AQ,
+	I40E_NVMUPD_GET_AQ_RESULT,
 };
 
 enum i40e_nvmupd_state {
 	I40E_NVMUPD_STATE_INIT,
 	I40E_NVMUPD_STATE_READING,
-	I40E_NVMUPD_STATE_WRITING
+	I40E_NVMUPD_STATE_WRITING,
+	I40E_NVMUPD_STATE_INIT_WAIT,
+	I40E_NVMUPD_STATE_WRITE_WAIT,
 };
 
 /* nvm_access definition and its masks/shifts need to be accessible to
@@ -329,6 +336,7 @@
 #define I40E_NVM_SA		(I40E_NVM_SNT | I40E_NVM_LCB)
 #define I40E_NVM_ERA		0x4
 #define I40E_NVM_CSUM		0x8
+#define I40E_NVM_EXEC		0xf
 
 #define I40E_NVM_ADAPT_SHIFT	16
 #define I40E_NVM_ADAPT_MASK	(0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -492,6 +500,8 @@
 
 	/* state of nvm update process */
 	enum i40e_nvmupd_state nvmupd_state;
+	struct i40e_aq_desc nvm_wb_desc;
+	struct i40e_virt_mem nvm_buff;
 
 	/* HMC info */
 	struct i40e_hmc_info hmc; /* HMC info struct */
@@ -500,8 +510,9 @@
 	u16 dcbx_status;
 
 	/* DCBX info */
-	struct i40e_dcbx_config local_dcbx_config;
-	struct i40e_dcbx_config remote_dcbx_config;
+	struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+	struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+	struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
 
 	/* debug mask */
 	u32 debug_mask;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 0f8d415..95d0f8c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -81,7 +81,6 @@
 	I40E_VIRTCHNL_OP_GET_STATS = 15,
 	I40E_VIRTCHNL_OP_FCOE = 16,
 	I40E_VIRTCHNL_OP_EVENT = 17,
-	I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
 };
 
 /* Virtual channel message descriptor. This overlays the admin queue
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index d99c116..ee747dc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -536,6 +536,7 @@
 	}
 	if (type == I40E_VSI_SRIOV) {
 		u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
 		vf->lan_vsi_idx = vsi->idx;
 		vf->lan_vsi_id = vsi->id;
 		/* If the port VLAN has been configured and then the
@@ -561,7 +562,7 @@
 	}
 
 	/* program mac filter */
-	ret = i40e_sync_vsi_filters(vsi);
+	ret = i40e_sync_vsi_filters(vsi, false);
 	if (ret)
 		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 
@@ -605,6 +606,7 @@
 	/* map PF queues to VF queues */
 	for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
 		u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
+
 		reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
 		wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
 		total_queue_pairs++;
@@ -872,6 +874,11 @@
 			i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
 					       false);
 
+	for (i = 0; i < pf->num_alloc_vfs; i++)
+		if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+			i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
+					       false);
+
 	/* Disable IOV before freeing resources. This lets any VF drivers
 	 * running in the host get themselves cleaned up before we yank
 	 * the carpet out from underneath their feet.
@@ -986,24 +993,26 @@
 	int pre_existing_vfs = pci_num_vf(pdev);
 	int err = 0;
 
-	if (pf->state & __I40E_TESTING) {
+	if (test_bit(__I40E_TESTING, &pf->state)) {
 		dev_warn(&pdev->dev,
 			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
 		err = -EPERM;
 		goto err_out;
 	}
 
-	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
 		i40e_free_vfs(pf);
 	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
 		goto out;
 
 	if (num_vfs > pf->num_req_vfs) {
+		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
+			 num_vfs, pf->num_req_vfs);
 		err = -EPERM;
 		goto err_out;
 	}
 
+	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
 	err = i40e_alloc_vfs(pf, num_vfs);
 	if (err) {
 		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
@@ -1201,10 +1210,12 @@
 	if (vf->lan_vsi_idx) {
 		vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
 		vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
-		vfres->vsi_res[i].num_queue_pairs =
-		    pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
-		memcpy(vfres->vsi_res[i].default_mac_addr,
-		       vf->default_lan_addr.addr, ETH_ALEN);
+		vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
+		/* VFs only use TC 0 */
+		vfres->vsi_res[i].qset_handle
+					  = le16_to_cpu(vsi->info.qs_handle[0]);
+		ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
+				vf->default_lan_addr.addr);
 		i++;
 	}
 	set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
@@ -1605,7 +1616,7 @@
 	}
 
 	/* program the updated filter list */
-	if (i40e_sync_vsi_filters(vsi))
+	if (i40e_sync_vsi_filters(vsi, false))
 		dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
 
 error_param:
@@ -1656,7 +1667,7 @@
 				I40E_VLAN_ANY, true, false);
 
 	/* program the updated filter list */
-	if (i40e_sync_vsi_filters(vsi))
+	if (i40e_sync_vsi_filters(vsi, false))
 		dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
 
 error_param:
@@ -1708,6 +1719,7 @@
 	for (i = 0; i < vfl->num_elements; i++) {
 		/* add new VLAN filter */
 		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+
 		if (ret)
 			dev_err(&pf->pdev->dev,
 				"Unable to add VF vlan filter %d, error %d\n",
@@ -1759,6 +1771,7 @@
 
 	for (i = 0; i < vfl->num_elements; i++) {
 		int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+
 		if (ret)
 			dev_err(&pf->pdev->dev,
 				"Unable to delete VF vlan filter %d, error %d\n",
@@ -1870,7 +1883,6 @@
 	case I40E_VIRTCHNL_OP_UNKNOWN:
 	default:
 		return -EPERM;
-		break;
 	}
 	/* few more checks */
 	if ((valid_len != msglen) || (err_msg_format)) {
@@ -2062,7 +2074,7 @@
 
 	dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
 	/* program mac filter */
-	if (i40e_sync_vsi_filters(vsi)) {
+	if (i40e_sync_vsi_filters(vsi, false)) {
 		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 		ret = -EIO;
 		goto error_param;
@@ -2089,6 +2101,7 @@
 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
 			      int vf_id, u16 vlan_id, u8 qos)
 {
+	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_pf *pf = np->vsi->back;
 	struct i40e_vsi *vsi;
@@ -2116,8 +2129,7 @@
 		goto error_pvid;
 	}
 
-	if (le16_to_cpu(vsi->info.pvid) ==
-	    (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)))
+	if (le16_to_cpu(vsi->info.pvid) == vlanprio)
 		/* duplicate request, so just return success */
 		goto error_pvid;
 
@@ -2141,7 +2153,7 @@
 	 * MAC addresses deleted.
 	 */
 	if ((!(vlan_id || qos) ||
-	    (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
+	    vlanprio != le16_to_cpu(vsi->info.pvid)) &&
 	    vsi->info.pvid)
 		ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
 
@@ -2156,8 +2168,7 @@
 		}
 	}
 	if (vlan_id || qos)
-		ret = i40e_vsi_add_pvid(vsi,
-				vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
+		ret = i40e_vsi_add_pvid(vsi, vlanprio);
 	else
 		i40e_vsi_remove_pvid(vsi);
 
@@ -2310,7 +2321,7 @@
 
 	ivi->vf = vf_id;
 
-	memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
+	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
 
 	ivi->max_tx_rate = vf->tx_rate;
 	ivi->min_tx_rate = 0;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index f08450b..3eba369 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -469,8 +469,12 @@
 {
 	i40e_status ret_code = 0;
 
-	if (hw->aq.asq.count == 0)
-		return I40E_ERR_NOT_READY;
+	mutex_lock(&hw->aq.asq_mutex);
+
+	if (hw->aq.asq.count == 0) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto shutdown_asq_out;
+	}
 
 	/* Stop firmware AdminQ processing */
 	wr32(hw, hw->aq.asq.head, 0);
@@ -479,16 +483,13 @@
 	wr32(hw, hw->aq.asq.bal, 0);
 	wr32(hw, hw->aq.asq.bah, 0);
 
-	/* make sure lock is available */
-	mutex_lock(&hw->aq.asq_mutex);
-
 	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
 
 	/* free ring buffers */
 	i40e_free_asq_bufs(hw);
 
+shutdown_asq_out:
 	mutex_unlock(&hw->aq.asq_mutex);
-
 	return ret_code;
 }
 
@@ -502,8 +503,12 @@
 {
 	i40e_status ret_code = 0;
 
-	if (hw->aq.arq.count == 0)
-		return I40E_ERR_NOT_READY;
+	mutex_lock(&hw->aq.arq_mutex);
+
+	if (hw->aq.arq.count == 0) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto shutdown_arq_out;
+	}
 
 	/* Stop firmware AdminQ processing */
 	wr32(hw, hw->aq.arq.head, 0);
@@ -512,16 +517,13 @@
 	wr32(hw, hw->aq.arq.bal, 0);
 	wr32(hw, hw->aq.arq.bah, 0);
 
-	/* make sure lock is available */
-	mutex_lock(&hw->aq.arq_mutex);
-
 	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
 
 	/* free ring buffers */
 	i40e_free_arq_bufs(hw);
 
+shutdown_arq_out:
 	mutex_unlock(&hw->aq.arq_mutex);
-
 	return ret_code;
 }
 
@@ -596,6 +598,9 @@
 
 	/* destroy the locks */
 
+	if (hw->nvm_buff.va)
+		i40e_free_virt_mem(hw, &hw->nvm_buff);
+
 	return ret_code;
 }
 
@@ -617,8 +622,7 @@
 	details = I40E_ADMINQ_DETAILS(*asq, ntc);
 	while (rd32(hw, hw->aq.asq.head) != ntc) {
 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "%s: ntc %d head %d.\n", __func__, ntc,
-			   rd32(hw, hw->aq.asq.head));
+			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
 
 		if (details->callback) {
 			I40E_ADMINQ_CALLBACK cb_func =
@@ -682,19 +686,23 @@
 	u16  retval = 0;
 	u32  val = 0;
 
-	val = rd32(hw, hw->aq.asq.head);
-	if (val >= hw->aq.num_asq_entries) {
-		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: head overrun at %d\n", val);
-		status = I40E_ERR_QUEUE_EMPTY;
-		goto asq_send_command_exit;
-	}
+	mutex_lock(&hw->aq.asq_mutex);
 
 	if (hw->aq.asq.count == 0) {
 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 			   "AQTX: Admin queue not initialized.\n");
 		status = I40E_ERR_QUEUE_EMPTY;
-		goto asq_send_command_exit;
+		goto asq_send_command_error;
+	}
+
+	hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+	val = rd32(hw, hw->aq.asq.head);
+	if (val >= hw->aq.num_asq_entries) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: head overrun at %d\n", val);
+		status = I40E_ERR_QUEUE_EMPTY;
+		goto asq_send_command_error;
 	}
 
 	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
@@ -719,8 +727,6 @@
 	desc->flags &= ~cpu_to_le16(details->flags_dis);
 	desc->flags |= cpu_to_le16(details->flags_ena);
 
-	mutex_lock(&hw->aq.asq_mutex);
-
 	if (buff_size > hw->aq.asq_buf_size) {
 		i40e_debug(hw,
 			   I40E_DEBUG_AQ_MESSAGE,
@@ -830,6 +836,10 @@
 	i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
 			buff_size);
 
+	/* save writeback aq if requested */
+	if (details->wb_desc)
+		*details->wb_desc = *desc_on_ring;
+
 	/* update the error if time out occurred */
 	if ((!cmd_completed) &&
 	    (!details->async && !details->postpone)) {
@@ -841,7 +851,6 @@
 
 asq_send_command_error:
 	mutex_unlock(&hw->aq.asq_mutex);
-asq_send_command_exit:
 	return status;
 }
 
@@ -887,6 +896,13 @@
 	/* take the lock before we start messing with the ring */
 	mutex_lock(&hw->aq.arq_mutex);
 
+	if (hw->aq.arq.count == 0) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQRX: Admin queue not initialized.\n");
+		ret_code = I40E_ERR_QUEUE_EMPTY;
+		goto clean_arq_element_err;
+	}
+
 	/* set next_to_use to head */
 	ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
 	if (ntu == ntc) {
@@ -948,6 +964,8 @@
 	/* Set pending if needed, unlock and return */
 	if (pending != NULL)
 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
 	mutex_unlock(&hw->aq.arq_mutex);
 
 	return ret_code;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index ef43d68..a3eae5d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -69,6 +69,7 @@
 	u16 flags_dis;
 	bool async;
 	bool postpone;
+	struct i40e_aq_desc *wb_desc;
 };
 
 #define I40E_ADMINQ_DETAILS(R, i)   \
@@ -108,9 +109,10 @@
 
 /**
  * i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_rc: AdminQ error code to convert
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
  **/
-static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
+static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
 {
 	int aq_to_posix[] = {
 		0,           /* I40E_AQ_RC_OK */
@@ -142,8 +144,9 @@
 	if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
 		return -EAGAIN;
 
-	if (aq_rc >= ARRAY_SIZE(aq_to_posix))
+	if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
 		return -ERANGE;
+
 	return aq_to_posix[aq_rc];
 }
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index d45d0ae..b98b642 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -51,7 +51,9 @@
 		case I40E_DEV_ID_QSFP_B:
 		case I40E_DEV_ID_QSFP_C:
 		case I40E_DEV_ID_10G_BASE_T:
+		case I40E_DEV_ID_10G_BASE_T4:
 		case I40E_DEV_ID_20G_KR2:
+		case I40E_DEV_ID_20G_KR2_A:
 			hw->mac.type = I40E_MAC_XL710;
 			break;
 		case I40E_DEV_ID_SFP_X722:
@@ -990,10 +992,10 @@
 			     I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
 	for (i = 0; i < msg->num_vsis; i++) {
 		if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
-			memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr,
-			       ETH_ALEN);
-			memcpy(hw->mac.addr, vsi_res->default_mac_addr,
-			       ETH_ALEN);
+			ether_addr_copy(hw->mac.perm_addr,
+					vsi_res->default_mac_addr);
+			ether_addr_copy(hw->mac.addr,
+					vsi_res->default_mac_addr);
 		}
 		vsi_res++;
 	}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 7e91d82..0e71eb4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -140,65 +140,6 @@
 	return le32_to_cpu(*(volatile __le32 *)head);
 }
 
-/**
- * i40e_get_tx_pending - how many tx descriptors not processed
- * @tx_ring: the ring of descriptors
- *
- * Since there is no access to the ring head register
- * in XL710, we need to use our local copies
- **/
-static u32 i40e_get_tx_pending(struct i40e_ring *ring)
-{
-	u32 head, tail;
-
-	head = i40e_get_head(ring);
-	tail = readl(ring->tail);
-
-	if (head != tail)
-		return (head < tail) ?
-			tail - head : (tail + ring->count - head);
-
-	return 0;
-}
-
-/**
- * i40e_check_tx_hang - Is there a hang in the Tx queue
- * @tx_ring: the ring of descriptors
- **/
-static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
-{
-	u32 tx_done = tx_ring->stats.packets;
-	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
-	u32 tx_pending = i40e_get_tx_pending(tx_ring);
-	bool ret = false;
-
-	clear_check_for_tx_hang(tx_ring);
-
-	/* Check for a hung queue, but be thorough. This verifies
-	 * that a transmit has been completed since the previous
-	 * check AND there is at least one packet pending. The
-	 * ARMED bit is set to indicate a potential hang. The
-	 * bit is cleared if a pause frame is received to remove
-	 * false hang detection due to PFC or 802.3x frames. By
-	 * requiring this to fail twice we avoid races with
-	 * PFC clearing the ARMED bit and conditions where we
-	 * run the check_tx_hang logic with a transmit completion
-	 * pending but without time to complete it yet.
-	 */
-	if ((tx_done_old == tx_done) && tx_pending) {
-		/* make sure it is true for two checks in a row */
-		ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
-				       &tx_ring->state);
-	} else if (tx_done_old == tx_done &&
-		   (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
-		/* update completed stats and disarm the hang check */
-		tx_ring->tx_stats.tx_done_old = tx_done;
-		clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
-	}
-
-	return ret;
-}
-
 #define WB_STRIDE 0x3
 
 /**
@@ -304,36 +245,15 @@
 	tx_ring->q_vector->tx.total_bytes += total_bytes;
 	tx_ring->q_vector->tx.total_packets += total_packets;
 
+	/* check to see if there are any non-cache aligned descriptors
+	 * waiting to be written back, and kick the hardware to force
+	 * them to be written back in case of napi polling
+	 */
 	if (budget &&
 	    !((i & WB_STRIDE) == WB_STRIDE) &&
 	    !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
 	    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
 		tx_ring->arm_wb = true;
-	else
-		tx_ring->arm_wb = false;
-
-	if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
-		/* schedule immediate reset if we believe we hung */
-		dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
-			 "  VSI                  <%d>\n"
-			 "  Tx Queue             <%d>\n"
-			 "  next_to_use          <%x>\n"
-			 "  next_to_clean        <%x>\n",
-			 tx_ring->vsi->seid,
-			 tx_ring->queue_index,
-			 tx_ring->next_to_use, i);
-
-		netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-
-		dev_info(tx_ring->dev,
-			 "tx hang detected on queue %d, resetting adapter\n",
-			 tx_ring->queue_index);
-
-		tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
-
-		/* the adapter is about to reset, no point in enabling stuff */
-		return true;
-	}
 
 	netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
 						      tx_ring->queue_index),
@@ -355,16 +275,16 @@
 		}
 	}
 
-	return budget > 0;
+	return !!budget;
 }
 
 /**
- * i40e_force_wb -Arm hardware to do a wb on noncache aligned descriptors
+ * i40evf_force_wb -Arm hardware to do a wb on noncache aligned descriptors
  * @vsi: the VSI we care about
  * @q_vector: the vector  on which to force writeback
  *
  **/
-static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 {
 	u16 flags = q_vector->tx.ring[0].flags;
 
@@ -997,7 +917,7 @@
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 	u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-	const int current_node = numa_node_id();
+	const int current_node = numa_mem_id();
 	struct i40e_vsi *vsi = rx_ring->vsi;
 	u16 i = rx_ring->next_to_clean;
 	union i40e_rx_desc *rx_desc;
@@ -1067,6 +987,7 @@
 		cleaned_count++;
 		if (rx_hbo || rx_sph) {
 			int len;
+
 			if (rx_hbo)
 				len = I40E_RX_HDR_SIZE;
 			else
@@ -1240,9 +1161,6 @@
 		/* ERR_MASK will only have valid bits if EOP set */
 		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
-			/* TODO: shouldn't we increment a counter indicating the
-			 * drop?
-			 */
 			continue;
 		}
 
@@ -1366,6 +1284,7 @@
 	i40e_for_each_ring(ring, q_vector->tx) {
 		clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
 		arm_wb |= ring->arm_wb;
+		ring->arm_wb = false;
 	}
 
 	/* We attempt to distribute budget to each Rx queue fairly, but don't
@@ -1385,7 +1304,7 @@
 	/* If work not completed, return budget and polling will return */
 	if (!clean_complete) {
 		if (arm_wb)
-			i40e_force_wb(vsi, q_vector);
+			i40evf_force_wb(vsi, q_vector);
 		return budget;
 	}
 
@@ -1437,6 +1356,7 @@
 	/* else if it is a SW VLAN, check the next protocol and store the tag */
 	} else if (protocol == htons(ETH_P_8021Q)) {
 		struct vlan_hdr *vhdr, _vhdr;
+
 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
 		if (!vhdr)
 			return -EINVAL;
@@ -1979,6 +1899,7 @@
 	u32 td_cmd = 0;
 	u8 hdr_len = 0;
 	int tso;
+
 	if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
 		return NETDEV_TX_BUSY;
 
@@ -2006,10 +1927,11 @@
 	else if (tso)
 		tx_flags |= I40E_TX_FLAGS_TSO;
 
-	if (i40e_chk_linearize(skb, tx_flags))
+	if (i40e_chk_linearize(skb, tx_flags)) {
 		if (skb_linearize(skb))
 			goto out_drop;
-
+		tx_ring->tx_stats.tx_linearize++;
+	}
 	skb_tx_timestamp(skb);
 
 	/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 9a30f5d..0c13ece 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -164,6 +164,7 @@
 	};
 	unsigned int bytecount;
 	unsigned short gso_segs;
+
 	DEFINE_DMA_UNMAP_ADDR(dma);
 	DEFINE_DMA_UNMAP_LEN(len);
 	u32 tx_flags;
@@ -187,6 +188,7 @@
 	u64 restart_queue;
 	u64 tx_busy;
 	u64 tx_done_old;
+	u64 tx_linearize;
 };
 
 struct i40e_rx_queue_stats {
@@ -198,8 +200,6 @@
 enum i40e_ring_state_t {
 	__I40E_TX_FDIR_INIT_DONE,
 	__I40E_TX_XPS_INIT_DONE,
-	__I40E_TX_DETECT_HANG,
-	__I40E_HANG_CHECK_ARMED,
 	__I40E_RX_PS_ENABLED,
 	__I40E_RX_16BYTE_DESC_ENABLED,
 };
@@ -210,12 +210,6 @@
 	set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
 #define clear_ring_ps_enabled(ring) \
 	clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define check_for_tx_hang(ring) \
-	test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define set_check_for_tx_hang(ring) \
-	set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define clear_check_for_tx_hang(ring) \
-	clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
 #define ring_is_16byte_desc_enabled(ring) \
 	test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
 #define set_ring_16byte_desc_enabled(ring) \
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 24a2693..a59b60f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -45,6 +45,8 @@
 #define I40E_DEV_ID_QSFP_C		0x1585
 #define I40E_DEV_ID_10G_BASE_T		0x1586
 #define I40E_DEV_ID_20G_KR2		0x1587
+#define I40E_DEV_ID_20G_KR2_A		0x1588
+#define I40E_DEV_ID_10G_BASE_T4		0x1589
 #define I40E_DEV_ID_VF			0x154C
 #define I40E_DEV_ID_VF_HV		0x1571
 #define I40E_DEV_ID_SFP_X722		0x37D0
@@ -158,14 +160,14 @@
 };
 
 enum i40e_vsi_type {
-	I40E_VSI_MAIN = 0,
-	I40E_VSI_VMDQ1,
-	I40E_VSI_VMDQ2,
-	I40E_VSI_CTRL,
-	I40E_VSI_FCOE,
-	I40E_VSI_MIRROR,
-	I40E_VSI_SRIOV,
-	I40E_VSI_FDIR,
+	I40E_VSI_MAIN	= 0,
+	I40E_VSI_VMDQ1	= 1,
+	I40E_VSI_VMDQ2	= 2,
+	I40E_VSI_CTRL	= 3,
+	I40E_VSI_FCOE	= 4,
+	I40E_VSI_MIRROR	= 5,
+	I40E_VSI_SRIOV	= 6,
+	I40E_VSI_FDIR	= 7,
 	I40E_VSI_TYPE_UNKNOWN
 };
 
@@ -304,12 +306,17 @@
 	I40E_NVMUPD_CSUM_CON,
 	I40E_NVMUPD_CSUM_SA,
 	I40E_NVMUPD_CSUM_LCB,
+	I40E_NVMUPD_STATUS,
+	I40E_NVMUPD_EXEC_AQ,
+	I40E_NVMUPD_GET_AQ_RESULT,
 };
 
 enum i40e_nvmupd_state {
 	I40E_NVMUPD_STATE_INIT,
 	I40E_NVMUPD_STATE_READING,
-	I40E_NVMUPD_STATE_WRITING
+	I40E_NVMUPD_STATE_WRITING,
+	I40E_NVMUPD_STATE_INIT_WAIT,
+	I40E_NVMUPD_STATE_WRITE_WAIT,
 };
 
 /* nvm_access definition and its masks/shifts need to be accessible to
@@ -328,6 +335,7 @@
 #define I40E_NVM_SA		(I40E_NVM_SNT | I40E_NVM_LCB)
 #define I40E_NVM_ERA		0x4
 #define I40E_NVM_CSUM		0x8
+#define I40E_NVM_EXEC		0xf
 
 #define I40E_NVM_ADAPT_SHIFT	16
 #define I40E_NVM_ADAPT_MASK	(0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -486,6 +494,8 @@
 
 	/* state of nvm update process */
 	enum i40e_nvmupd_state nvmupd_state;
+	struct i40e_aq_desc nvm_wb_desc;
+	struct i40e_virt_mem nvm_buff;
 
 	/* HMC info */
 	struct i40e_hmc_info hmc; /* HMC info struct */
@@ -494,8 +504,9 @@
 	u16 dcbx_status;
 
 	/* DCBX info */
-	struct i40e_dcbx_config local_dcbx_config;
-	struct i40e_dcbx_config remote_dcbx_config;
+	struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+	struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+	struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
 
 	/* debug mask */
 	u32 debug_mask;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index e6db20e..cadda64 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -81,7 +81,6 @@
 	I40E_VIRTCHNL_OP_GET_STATS = 15,
 	I40E_VIRTCHNL_OP_FCOE = 16,
 	I40E_VIRTCHNL_OP_EVENT = 17,
-	I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
 };
 
 /* Virtual channel message descriptor. This overlays the admin queue
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 3817cbb..e7a223e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -48,10 +48,6 @@
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 #define PFX "i40evf: "
-#define DPRINTK(nlevel, klevel, fmt, args...) \
-	((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
-	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
-		__func__ , ## args)))
 
 /* dummy struct to make common code less painful */
 struct i40e_vsi {
@@ -70,6 +66,7 @@
 	 */
 	u16 rx_itr_setting;
 	u16 tx_itr_setting;
+	u16 qs_handle;
 };
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index e85849b..c00e495 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -34,7 +34,7 @@
 static const char i40evf_driver_string[] =
 	"Intel(R) XL710/X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "1.3.5"
+#define DRV_VERSION "1.3.13"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
 	"Copyright (c) 2013 - 2015 Intel Corporation.";
@@ -489,8 +489,7 @@
 			q_vector);
 		if (err) {
 			dev_info(&adapter->pdev->dev,
-				 "%s: request_irq failed, error: %d\n",
-				__func__, err);
+				 "Request_irq failed, error: %d\n", err);
 			goto free_queue_irqs;
 		}
 		/* assign the mask for this irq */
@@ -731,6 +730,8 @@
 {
 	struct i40evf_adapter *adapter = netdev_priv(netdev);
 
+	if (!VLAN_ALLOWED(adapter))
+		return -EIO;
 	if (i40evf_add_vlan(adapter, vid) == NULL)
 		return -ENOMEM;
 	return 0;
@@ -746,8 +747,11 @@
 {
 	struct i40evf_adapter *adapter = netdev_priv(netdev);
 
-	i40evf_del_vlan(adapter, vid);
-	return 0;
+	if (VLAN_ALLOWED(adapter)) {
+		i40evf_del_vlan(adapter, vid);
+		return 0;
+	}
+	return -EIO;
 }
 
 /**
@@ -856,6 +860,7 @@
 	struct i40evf_mac_filter *f, *ftmp;
 	struct netdev_hw_addr *uca;
 	struct netdev_hw_addr *mca;
+	struct netdev_hw_addr *ha;
 	int count = 50;
 
 	/* add addr if not already in the filter list */
@@ -877,29 +882,27 @@
 	}
 	/* remove filter if not in netdev list */
 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
-		bool found = false;
+		netdev_for_each_mc_addr(mca, netdev)
+			if (ether_addr_equal(mca->addr, f->macaddr))
+				goto bottom_of_search_loop;
 
-		if (is_multicast_ether_addr(f->macaddr)) {
-			netdev_for_each_mc_addr(mca, netdev) {
-				if (ether_addr_equal(mca->addr, f->macaddr)) {
-					found = true;
-					break;
-				}
-			}
-		} else {
-			netdev_for_each_uc_addr(uca, netdev) {
-				if (ether_addr_equal(uca->addr, f->macaddr)) {
-					found = true;
-					break;
-				}
-			}
-			if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
-				found = true;
-		}
-		if (!found) {
-			f->remove = true;
-			adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
-		}
+		netdev_for_each_uc_addr(uca, netdev)
+			if (ether_addr_equal(uca->addr, f->macaddr))
+				goto bottom_of_search_loop;
+
+		for_each_dev_addr(netdev, ha)
+			if (ether_addr_equal(ha->addr, f->macaddr))
+				goto bottom_of_search_loop;
+
+		if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
+			goto bottom_of_search_loop;
+
+		/* f->macaddr wasn't found in uc, mc, or ha list so delete it */
+		f->remove = true;
+		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+
+bottom_of_search_loop:
+		continue;
 	}
 	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 }
@@ -1165,7 +1168,7 @@
 	for (vector = 0; vector < v_budget; vector++)
 		adapter->msix_entries[vector].entry = vector;
 
-	i40evf_acquire_msix_vectors(adapter, v_budget);
+	err = i40evf_acquire_msix_vectors(adapter, v_budget);
 
 out:
 	adapter->netdev->real_num_tx_queues = pairs;
@@ -1421,16 +1424,16 @@
 						      struct i40evf_adapter,
 						      watchdog_task);
 	struct i40e_hw *hw = &adapter->hw;
-	uint32_t rstat_val;
+	u32 reg_val;
 
 	if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
 		goto restart_watchdog;
 
 	if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
-		rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
-			    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-		if ((rstat_val == I40E_VFR_VFACTIVE) ||
-		    (rstat_val == I40E_VFR_COMPLETED)) {
+		reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
+			  I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+		if ((reg_val == I40E_VFR_VFACTIVE) ||
+		    (reg_val == I40E_VFR_COMPLETED)) {
 			/* A chance for redemption! */
 			dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
 			adapter->state = __I40EVF_STARTUP;
@@ -1455,11 +1458,8 @@
 		goto watchdog_done;
 
 	/* check for reset */
-	rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
-		    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-	if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
-	    (rstat_val != I40E_VFR_VFACTIVE) &&
-	    (rstat_val != I40E_VFR_COMPLETED)) {
+	reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
+	if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
 		adapter->state = __I40EVF_RESETTING;
 		adapter->flags |= I40EVF_FLAG_RESET_PENDING;
 		dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
@@ -1574,7 +1574,7 @@
 	struct net_device *netdev = adapter->netdev;
 	struct i40e_hw *hw = &adapter->hw;
 	struct i40evf_mac_filter *f;
-	uint32_t rstat_val;
+	u32 reg_val;
 	int i = 0, err;
 
 	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
@@ -1595,12 +1595,11 @@
 
 	/* poll until we see the reset actually happen */
 	for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
-		rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
-			    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-		if ((rstat_val != I40E_VFR_VFACTIVE) &&
-		    (rstat_val != I40E_VFR_COMPLETED))
+		reg_val = rd32(hw, I40E_VF_ARQLEN1) &
+			  I40E_VF_ARQLEN1_ARQENABLE_MASK;
+		if (!reg_val)
 			break;
-		usleep_range(500, 1000);
+		usleep_range(5000, 10000);
 	}
 	if (i == I40EVF_RESET_WAIT_COUNT) {
 		dev_info(&adapter->pdev->dev, "Never saw reset\n");
@@ -1609,9 +1608,9 @@
 
 	/* wait until the reset is complete and the PF is responding to us */
 	for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
-		rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
-			    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-		if (rstat_val == I40E_VFR_VFACTIVE)
+		reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
+			  I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+		if (reg_val == I40E_VFR_VFACTIVE)
 			break;
 		msleep(I40EVF_RESET_WAIT_MS);
 	}
@@ -1623,7 +1622,7 @@
 
 		/* reset never finished */
 		dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
-			rstat_val);
+			reg_val);
 		adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
 
 		if (netif_running(adapter->netdev)) {
@@ -1853,8 +1852,7 @@
 		if (!err)
 			continue;
 		dev_err(&adapter->pdev->dev,
-			"%s: Allocation for Tx Queue %u failed\n",
-			__func__, i);
+			"Allocation for Tx Queue %u failed\n", i);
 		break;
 	}
 
@@ -1881,8 +1879,7 @@
 		if (!err)
 			continue;
 		dev_err(&adapter->pdev->dev,
-			"%s: Allocation for Rx Queue %u failed\n",
-			__func__, i);
+			"Allocation for Rx Queue %u failed\n", i);
 		break;
 	}
 	return err;
@@ -2118,6 +2115,7 @@
 	adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
 				       ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
 	adapter->vsi.netdev = adapter->netdev;
+	adapter->vsi.qs_handle = adapter->vsi_res->qset_handle;
 	return 0;
 }
 
@@ -2300,8 +2298,7 @@
 	}
 	return;
 restart:
-	schedule_delayed_work(&adapter->init_task,
-			      msecs_to_jiffies(50));
+	schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
 	return;
 
 err_register:
@@ -2434,7 +2431,8 @@
 	INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
 	INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
 	INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
-	schedule_delayed_work(&adapter->init_task, 10);
+	schedule_delayed_work(&adapter->init_task,
+			      msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
 
 	return 0;
 
@@ -2510,6 +2508,7 @@
 	rtnl_lock();
 	err = i40evf_set_interrupt_capability(adapter);
 	if (err) {
+		rtnl_unlock();
 		dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
 		return err;
 	}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index d4eb1a5..4f056ef 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -234,8 +234,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
@@ -288,8 +288,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
@@ -313,8 +313,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
@@ -341,8 +341,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
@@ -393,8 +393,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -410,8 +410,7 @@
 	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
 	      (count * sizeof(struct i40e_virtchnl_ether_addr));
 	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
-		dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
-			 __func__);
+		dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
 		count = (I40EVF_MAX_AQ_BUF_SIZE -
 			 sizeof(struct i40e_virtchnl_ether_addr_list)) /
 			sizeof(struct i40e_virtchnl_ether_addr);
@@ -453,8 +452,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -470,8 +469,7 @@
 	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
 	      (count * sizeof(struct i40e_virtchnl_ether_addr));
 	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
-		dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
-			 __func__);
+		dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
 		count = (I40EVF_MAX_AQ_BUF_SIZE -
 			 sizeof(struct i40e_virtchnl_ether_addr_list)) /
 			sizeof(struct i40e_virtchnl_ether_addr);
@@ -513,8 +511,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 
@@ -531,8 +529,7 @@
 	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
 	      (count * sizeof(u16));
 	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
-		dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
-			 __func__);
+		dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
 		count = (I40EVF_MAX_AQ_BUF_SIZE -
 			 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
 			sizeof(u16);
@@ -572,8 +569,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 
@@ -590,8 +587,7 @@
 	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
 	      (count * sizeof(u16));
 	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
-		dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
-			 __func__);
+		dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
 		count = (I40EVF_MAX_AQ_BUF_SIZE -
 			 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
 			sizeof(u16);
@@ -629,8 +625,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
@@ -720,17 +716,16 @@
 			}
 			break;
 		default:
-			dev_err(&adapter->pdev->dev,
-				"%s: Unknown event %d from pf\n",
-				__func__, vpe->event);
+			dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
+				vpe->event);
 			break;
 		}
 		return;
 	}
 	if (v_retval) {
-		dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n",
-			__func__, v_retval,
-			i40evf_stat_str(&adapter->hw, v_retval), v_opcode);
+		dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
+			v_retval, i40evf_stat_str(&adapter->hw, v_retval),
+			v_opcode);
 	}
 	switch (v_opcode) {
 	case I40E_VIRTCHNL_OP_GET_STATS: {
@@ -756,6 +751,8 @@
 			  sizeof(struct i40e_virtchnl_vsi_resource);
 		memcpy(adapter->vf_res, msg, min(msglen, len));
 		i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
+		/* restore current mac address */
+		ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
 		i40evf_process_config(adapter);
 		}
 		break;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 212d668..1a2f1cc 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -444,8 +444,8 @@
 
 	struct ptp_pin_desc sdp_config[IGB_N_SDP];
 	struct {
-		struct timespec start;
-		struct timespec period;
+		struct timespec64 start;
+		struct timespec64 period;
 	} perout[IGB_N_PEROUT];
 
 	char fw_version[32];
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e174fbb..7e62675 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2986,6 +2986,9 @@
 	}
 #endif /* CONFIG_PCI_IOV */
 
+	/* Assume MSI-X interrupts, will be checked during IRQ allocation */
+	adapter->flags |= IGB_FLAG_HAS_MSIX;
+
 	igb_probe_vfs(adapter);
 
 	igb_init_queue_configuration(adapter);
@@ -5389,7 +5392,7 @@
 {
 	struct e1000_hw *hw = &adapter->hw;
 	struct ptp_clock_event event;
-	struct timespec ts;
+	struct timespec64 ts;
 	u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
 
 	if (tsicr & TSINTR_SYS_WRAP) {
@@ -5409,10 +5412,11 @@
 
 	if (tsicr & TSINTR_TT0) {
 		spin_lock(&adapter->tmreg_lock);
-		ts = timespec_add(adapter->perout[0].start,
-				  adapter->perout[0].period);
+		ts = timespec64_add(adapter->perout[0].start,
+				    adapter->perout[0].period);
+		/* u32 conversion of tv_sec is safe until y2106 */
 		wr32(E1000_TRGTTIML0, ts.tv_nsec);
-		wr32(E1000_TRGTTIMH0, ts.tv_sec);
+		wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
 		tsauxc = rd32(E1000_TSAUXC);
 		tsauxc |= TSAUXC_EN_TT0;
 		wr32(E1000_TSAUXC, tsauxc);
@@ -5423,10 +5427,10 @@
 
 	if (tsicr & TSINTR_TT1) {
 		spin_lock(&adapter->tmreg_lock);
-		ts = timespec_add(adapter->perout[1].start,
-				  adapter->perout[1].period);
+		ts = timespec64_add(adapter->perout[1].start,
+				    adapter->perout[1].period);
 		wr32(E1000_TRGTTIML1, ts.tv_nsec);
-		wr32(E1000_TRGTTIMH1, ts.tv_sec);
+		wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
 		tsauxc = rd32(E1000_TSAUXC);
 		tsauxc |= TSAUXC_EN_TT1;
 		wr32(E1000_TSAUXC, tsauxc);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 5982f28..c44df87 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -143,7 +143,7 @@
 	 * sub-nanosecond resolution.
 	 */
 	wr32(E1000_SYSTIML, ts->tv_nsec);
-	wr32(E1000_SYSTIMH, ts->tv_sec);
+	wr32(E1000_SYSTIMH, (u32)ts->tv_sec);
 }
 
 /**
@@ -479,7 +479,7 @@
 	struct e1000_hw *hw = &igb->hw;
 	u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout;
 	unsigned long flags;
-	struct timespec ts;
+	struct timespec64 ts;
 	int use_freq = 0, pin = -1;
 	s64 ns;
 
@@ -523,14 +523,14 @@
 		}
 		ts.tv_sec = rq->perout.period.sec;
 		ts.tv_nsec = rq->perout.period.nsec;
-		ns = timespec_to_ns(&ts);
+		ns = timespec64_to_ns(&ts);
 		ns = ns >> 1;
 		if (on && ns <= 70000000LL) {
 			if (ns < 8LL)
 				return -EINVAL;
 			use_freq = 1;
 		}
-		ts = ns_to_timespec(ns);
+		ts = ns_to_timespec64(ns);
 		if (rq->perout.index == 1) {
 			if (use_freq) {
 				tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 686fa71..e86d41e 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2615,6 +2615,7 @@
 	.ndo_poll_controller	= igbvf_netpoll,
 #endif
 	.ndo_set_features	= igbvf_set_features,
+	.ndo_features_check	= passthru_features_check,
 };
 
 /**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a699c99..dda0f67 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -594,6 +594,7 @@
 
 /* default to trying for four seconds */
 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
+#define IXGBE_SFP_POLL_JIFFIES (2 * HZ)	/* SFP poll every 2 seconds */
 
 /* board specific private data structure */
 struct ixgbe_adapter {
@@ -707,6 +708,7 @@
 
 	u32 link_speed;
 	bool link_up;
+	unsigned long sfp_poll_time;
 	unsigned long link_check_timeout;
 
 	struct timer_list service_timer;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index dd7062f..a39afcf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -44,9 +44,8 @@
 static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
 static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
 static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
-						 ixgbe_link_speed speed,
-						 bool autoneg_wait_to_complete);
+static void
+ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed);
 static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
 					   ixgbe_link_speed speed,
 					   bool autoneg_wait_to_complete);
@@ -109,6 +108,9 @@
 	if (hw->phy.multispeed_fiber) {
 		/* Set up dual speed SFP+ support */
 		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+		mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
+		mac->ops.set_rate_select_speed =
+					       ixgbe_set_hard_rate_select_speed;
 	} else {
 		if ((mac->ops.get_media_type(hw) ==
 		     ixgbe_media_type_backplane) &&
@@ -646,176 +648,32 @@
 }
 
 /**
- *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ * ixgbe_set_hard_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
  *
- *  Set the link speed in the AUTOC register and restarts link.
- **/
-static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
-					  ixgbe_link_speed speed,
-					  bool autoneg_wait_to_complete)
+ * Set module link speed via RS0/RS1 rate select pins.
+ */
+static void
+ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
 {
-	s32 status = 0;
-	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
-	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
-	u32 speedcnt = 0;
 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
-	u32 i = 0;
-	bool link_up = false;
-	bool autoneg = false;
 
-	/* Mask off requested but non-supported speeds */
-	status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
-						   &autoneg);
-	if (status != 0)
-		return status;
-
-	speed &= link_speed;
-
-	/*
-	 * Try each speed one by one, highest priority first.  We do this in
-	 * software because 10gb fiber doesn't support speed autonegotiation.
-	 */
-	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
-		speedcnt++;
-		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
-
-		/* If we already have link at this speed, just jump out */
-		status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
-						false);
-		if (status != 0)
-			return status;
-
-		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
-			goto out;
-
-		/* Set the module link speed */
-		switch (hw->phy.media_type) {
-		case ixgbe_media_type_fiber:
-			esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
-			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-			IXGBE_WRITE_FLUSH(hw);
-			break;
-		case ixgbe_media_type_fiber_qsfp:
-			/* QSFP module automatically detects MAC link speed */
-			break;
-		default:
-			hw_dbg(hw, "Unexpected media type.\n");
-			break;
-		}
-
-		/* Allow module to change analog characteristics (1G->10G) */
-		msleep(40);
-
-		status = ixgbe_setup_mac_link_82599(hw,
-						    IXGBE_LINK_SPEED_10GB_FULL,
-						    autoneg_wait_to_complete);
-		if (status != 0)
-			return status;
-
-		/* Flap the tx laser if it has not already been done */
-		if (hw->mac.ops.flap_tx_laser)
-			hw->mac.ops.flap_tx_laser(hw);
-
-		/*
-		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
-		 * Section 73.10.2, we may have to wait up to 500ms if KR is
-		 * attempted.  82599 uses the same timing for 10g SFI.
-		 */
-		for (i = 0; i < 5; i++) {
-			/* Wait for the link partner to also set speed */
-			msleep(100);
-
-			/* If we have link, just jump out */
-			status = hw->mac.ops.check_link(hw, &link_speed,
-							&link_up, false);
-			if (status != 0)
-				return status;
-
-			if (link_up)
-				goto out;
-		}
+	switch (speed) {
+	case IXGBE_LINK_SPEED_10GB_FULL:
+		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+		break;
+	case IXGBE_LINK_SPEED_1GB_FULL:
+		esdp_reg &= ~IXGBE_ESDP_SDP5;
+		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+		break;
+	default:
+		hw_dbg(hw, "Invalid fixed module speed\n");
+		return;
 	}
 
-	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
-		speedcnt++;
-		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
-			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
-
-		/* If we already have link at this speed, just jump out */
-		status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
-						false);
-		if (status != 0)
-			return status;
-
-		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
-			goto out;
-
-		/* Set the module link speed */
-		switch (hw->phy.media_type) {
-		case ixgbe_media_type_fiber:
-			esdp_reg &= ~IXGBE_ESDP_SDP5;
-			esdp_reg |= IXGBE_ESDP_SDP5_DIR;
-			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-			IXGBE_WRITE_FLUSH(hw);
-			break;
-		case ixgbe_media_type_fiber_qsfp:
-			/* QSFP module automatically detects MAC link speed */
-			break;
-		default:
-			hw_dbg(hw, "Unexpected media type.\n");
-			break;
-		}
-
-		/* Allow module to change analog characteristics (10G->1G) */
-		msleep(40);
-
-		status = ixgbe_setup_mac_link_82599(hw,
-						    IXGBE_LINK_SPEED_1GB_FULL,
-						    autoneg_wait_to_complete);
-		if (status != 0)
-			return status;
-
-		/* Flap the tx laser if it has not already been done */
-		if (hw->mac.ops.flap_tx_laser)
-			hw->mac.ops.flap_tx_laser(hw);
-
-		/* Wait for the link partner to also set speed */
-		msleep(100);
-
-		/* If we have link, just jump out */
-		status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
-						false);
-		if (status != 0)
-			return status;
-
-		if (link_up)
-			goto out;
-	}
-
-	/*
-	 * We didn't get link.  Configure back to the highest speed we tried,
-	 * (if there was more than one).  We call ourselves back with just the
-	 * single highest speed that the user requested.
-	 */
-	if (speedcnt > 1)
-		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
-							       highest_link_speed,
-							       autoneg_wait_to_complete);
-
-out:
-	/* Set autoneg_advertised value based on input link speed */
-	hw->phy.autoneg_advertised = 0;
-
-	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
-	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
-	return status;
+	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+	IXGBE_WRITE_FLUSH(hw);
 }
 
 /**
@@ -1766,6 +1624,16 @@
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
 
+	/* also use it for SCTP */
+	switch (hw->mac.type) {
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
+		break;
+	default:
+		break;
+	}
+
 	/* store source and destination IP masks (big-enian) */
 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
 			     ~input_mask->formatted.src_ip[0]);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3f56a80..ce61b36 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -297,13 +297,13 @@
 
 	/* Setup flow control */
 	ret_val = ixgbe_setup_fc(hw);
-	if (!ret_val)
-		return 0;
+	if (ret_val)
+		return ret_val;
 
 	/* Clear adapter stopped flag */
 	hw->adapter_stopped = false;
 
-	return ret_val;
+	return 0;
 }
 
 /**
@@ -2164,10 +2164,11 @@
 			/*
 			 * In order to prevent Tx hangs when the internal Tx
 			 * switch is enabled we must set the high water mark
-			 * to the maximum FCRTH value.  This allows the Tx
-			 * switch to function even under heavy Rx workloads.
+			 * to the Rx packet buffer size - 24KB.  This allows
+			 * the Tx switch to function even under heavy Rx
+			 * workloads.
 			 */
-			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
 		}
 
 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
@@ -2476,6 +2477,9 @@
 	hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
 
+	if (hw->mac.type >= ixgbe_mac_X550)
+		return 0;
+
 	/*
 	 * Before proceeding, make sure that the PCIe block does not have
 	 * transactions pending.
@@ -3920,3 +3924,213 @@
 	fwsm &= IXGBE_FWSM_MODE_MASK;
 	return fwsm == IXGBE_FWSM_FW_MODE_PT;
 }
+
+/**
+ *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Set the link speed in the MAC and/or PHY register and restarts link.
+ */
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+					  ixgbe_link_speed speed,
+					  bool autoneg_wait_to_complete)
+{
+	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+	s32 status = 0;
+	u32 speedcnt = 0;
+	u32 i = 0;
+	bool autoneg, link_up = false;
+
+	/* Mask off requested but non-supported speeds */
+	status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
+	if (status)
+		return status;
+
+	speed &= link_speed;
+
+	/* Try each speed one by one, highest priority first.  We do this in
+	 * software because 10Gb fiber doesn't support speed autonegotiation.
+	 */
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+		speedcnt++;
+		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+		/* If we already have link at this speed, just jump out */
+		status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+						false);
+		if (status)
+			return status;
+
+		if (link_speed == IXGBE_LINK_SPEED_10GB_FULL && link_up)
+			goto out;
+
+		/* Set the module link speed */
+		switch (hw->phy.media_type) {
+		case ixgbe_media_type_fiber:
+			hw->mac.ops.set_rate_select_speed(hw,
+						    IXGBE_LINK_SPEED_10GB_FULL);
+			break;
+		case ixgbe_media_type_fiber_qsfp:
+			/* QSFP module automatically detects MAC link speed */
+			break;
+		default:
+			hw_dbg(hw, "Unexpected media type\n");
+			break;
+		}
+
+		/* Allow module to change analog characteristics (1G->10G) */
+		msleep(40);
+
+		status = hw->mac.ops.setup_mac_link(hw,
+						    IXGBE_LINK_SPEED_10GB_FULL,
+						    autoneg_wait_to_complete);
+		if (status)
+			return status;
+
+		/* Flap the Tx laser if it has not already been done */
+		if (hw->mac.ops.flap_tx_laser)
+			hw->mac.ops.flap_tx_laser(hw);
+
+		/* Wait for the controller to acquire link.  Per IEEE 802.3ap,
+		 * Section 73.10.2, we may have to wait up to 500ms if KR is
+		 * attempted.  82599 uses the same timing for 10g SFI.
+		 */
+		for (i = 0; i < 5; i++) {
+			/* Wait for the link partner to also set speed */
+			msleep(100);
+
+			/* If we have link, just jump out */
+			status = hw->mac.ops.check_link(hw, &link_speed,
+							&link_up, false);
+			if (status)
+				return status;
+
+			if (link_up)
+				goto out;
+		}
+	}
+
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+		speedcnt++;
+		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+		/* If we already have link at this speed, just jump out */
+		status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+						false);
+		if (status)
+			return status;
+
+		if (link_speed == IXGBE_LINK_SPEED_1GB_FULL && link_up)
+			goto out;
+
+		/* Set the module link speed */
+		switch (hw->phy.media_type) {
+		case ixgbe_media_type_fiber:
+			hw->mac.ops.set_rate_select_speed(hw,
+						     IXGBE_LINK_SPEED_1GB_FULL);
+			break;
+		case ixgbe_media_type_fiber_qsfp:
+			/* QSFP module automatically detects link speed */
+			break;
+		default:
+			hw_dbg(hw, "Unexpected media type\n");
+			break;
+		}
+
+		/* Allow module to change analog characteristics (10G->1G) */
+		msleep(40);
+
+		status = hw->mac.ops.setup_mac_link(hw,
+						    IXGBE_LINK_SPEED_1GB_FULL,
+						    autoneg_wait_to_complete);
+		if (status)
+			return status;
+
+		/* Flap the Tx laser if it has not already been done */
+		if (hw->mac.ops.flap_tx_laser)
+			hw->mac.ops.flap_tx_laser(hw);
+
+		/* Wait for the link partner to also set speed */
+		msleep(100);
+
+		/* If we have link, just jump out */
+		status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+						false);
+		if (status)
+			return status;
+
+		if (link_up)
+			goto out;
+	}
+
+	/* We didn't get link.  Configure back to the highest speed we tried,
+	 * (if there was more than one).  We call ourselves back with just the
+	 * single highest speed that the user requested.
+	 */
+	if (speedcnt > 1)
+		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
+						      highest_link_speed,
+						      autoneg_wait_to_complete);
+
+out:
+	/* Set autoneg_advertised value based on input link speed */
+	hw->phy.autoneg_advertised = 0;
+
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+	return status;
+}
+
+/**
+ *  ixgbe_set_soft_rate_select_speed - Set module link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: link speed to set
+ *
+ *  Set module link speed via the soft rate select.
+ */
+void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
+				      ixgbe_link_speed speed)
+{
+	s32 status;
+	u8 rs, eeprom_data;
+
+	switch (speed) {
+	case IXGBE_LINK_SPEED_10GB_FULL:
+		/* one bit mask same as setting on */
+		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+		break;
+	case IXGBE_LINK_SPEED_1GB_FULL:
+		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+		break;
+	default:
+		hw_dbg(hw, "Invalid fixed module speed\n");
+		return;
+	}
+
+	/* Set RS0 */
+	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+					   IXGBE_I2C_EEPROM_DEV_ADDR2,
+					   &eeprom_data);
+	if (status) {
+		hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
+		return;
+	}
+
+	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
+
+	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+					    IXGBE_I2C_EEPROM_DEV_ADDR2,
+					    eeprom_data);
+	if (status) {
+		hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
+		return;
+	}
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2f779f3..a0044e4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -135,6 +135,11 @@
 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+					  ixgbe_link_speed speed,
+					  bool autoneg_wait_to_complete);
+void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
+				      ixgbe_link_speed speed);
 
 #define IXGBE_FAILED_READ_REG 0xffffffffU
 #define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 3b932fe..23277ab 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -259,7 +259,13 @@
 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
 		} else {
-			reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+			/* In order to prevent Tx hangs when the internal Tx
+			 * switch is enabled we must set the high water mark
+			 * to the Rx packet buffer size - 24KB.  This allows
+			 * the Tx switch to function even under heavy Rx
+			 * workloads.
+			 */
+			reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
 		}
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index acb1b91..1910039 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -79,7 +79,7 @@
 static char ixgbe_default_device_descr[] =
 			      "Intel(R) 10 Gigabit Network Connection";
 #endif
-#define DRV_VERSION "4.0.1-k"
+#define DRV_VERSION "4.2.1-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
 				"Copyright (c) 1999-2015 Intel Corporation.";
@@ -137,6 +137,7 @@
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
 	/* required last entry */
 	{0, }
 };
@@ -1244,9 +1245,12 @@
 				int cpu)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
+	u32 txctrl = 0;
 	u16 reg_offset;
 
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+		txctrl = dca3_get_tag(tx_ring->dev, cpu);
+
 	switch (hw->mac.type) {
 	case ixgbe_mac_82598EB:
 		reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
@@ -1278,9 +1282,11 @@
 				int cpu)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
+	u32 rxctrl = 0;
 	u8 reg_idx = rx_ring->reg_idx;
 
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+		rxctrl = dca3_get_tag(rx_ring->dev, cpu);
 
 	switch (hw->mac.type) {
 	case ixgbe_mac_82599EB:
@@ -1297,6 +1303,7 @@
 	 * which will cause the DCA tag to be cleared.
 	 */
 	rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+		  IXGBE_DCA_RXCTRL_DATA_DCA_EN |
 		  IXGBE_DCA_RXCTRL_DESC_DCA_EN;
 
 	IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
@@ -1326,11 +1333,13 @@
 {
 	int i;
 
-	if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
-		return;
-
 	/* always use CB2 mode, difference is masked in the CB driver */
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+				IXGBE_DCA_CTRL_DCA_MODE_CB2);
+	else
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+				IXGBE_DCA_CTRL_DCA_DISABLE);
 
 	for (i = 0; i < adapter->num_q_vectors; i++) {
 		adapter->q_vector[i]->cpu = -1;
@@ -1353,7 +1362,8 @@
 			break;
 		if (dca_add_requester(dev) == 0) {
 			adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
-			ixgbe_setup_dca(adapter);
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+					IXGBE_DCA_CTRL_DCA_MODE_CB2);
 			break;
 		}
 		/* Fall Through since DCA is disabled. */
@@ -1361,7 +1371,8 @@
 		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
 			dca_remove_requester(dev);
 			adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
-			IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+					IXGBE_DCA_CTRL_DCA_DISABLE);
 		}
 		break;
 	}
@@ -2509,6 +2520,7 @@
 		IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
 		if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
 			adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+			adapter->sfp_poll_time = 0;
 			ixgbe_service_event_schedule(adapter);
 		}
 	}
@@ -2631,6 +2643,8 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+			mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
 		if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
 			mask |= IXGBE_EICR_GPI_SDP0_X540;
 		mask |= IXGBE_EIMS_ECC;
@@ -3786,8 +3800,6 @@
 	u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
 
 	switch (hw->mac.type) {
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_82598EB:
 		/*
 		 * For VMDq support of different descriptor types or
@@ -3801,6 +3813,11 @@
 		 */
 		rdrxctl |= IXGBE_RDRXCTL_MVMEN;
 		break;
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		if (adapter->num_vfs)
+			rdrxctl |= IXGBE_RDRXCTL_PSP;
+		/* fall through for older HW */
 	case ixgbe_mac_82599EB:
 	case ixgbe_mac_X540:
 		/* Disable RSC for ACK packets */
@@ -4776,6 +4793,12 @@
 		break;
 	}
 
+#ifdef CONFIG_IXGBE_DCA
+	/* configure DCA */
+	if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
+		ixgbe_setup_dca(adapter);
+#endif /* CONFIG_IXGBE_DCA */
+
 #ifdef IXGBE_FCOE
 	/* configure FCoE L2 filters, redirection table, and Rx control */
 	ixgbe_configure_fcoe(adapter);
@@ -4802,6 +4825,7 @@
 		adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
 
 	adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+	adapter->sfp_poll_time = 0;
 }
 
 /**
@@ -4892,9 +4916,6 @@
 		case ixgbe_mac_82599EB:
 			gpie |= IXGBE_SDP0_GPIEN_8259X;
 			break;
-		case ixgbe_mac_X540:
-			gpie |= IXGBE_EIMS_TS;
-			break;
 		default:
 			break;
 		}
@@ -4904,9 +4925,15 @@
 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
 		gpie |= IXGBE_SDP1_GPIEN(hw);
 
-	if (hw->mac.type == ixgbe_mac_82599EB) {
-		gpie |= IXGBE_SDP1_GPIEN_8259X;
-		gpie |= IXGBE_SDP2_GPIEN_8259X;
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599EB:
+		gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
+		break;
+	case ixgbe_mac_X550EM_x:
+		gpie |= IXGBE_SDP0_GPIEN_X540;
+		break;
+	default:
+		break;
 	}
 
 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -5229,11 +5256,6 @@
 
 	ixgbe_clean_all_tx_rings(adapter);
 	ixgbe_clean_all_rx_rings(adapter);
-
-#ifdef CONFIG_IXGBE_DCA
-	/* since we reset the hardware DCA settings were cleared */
-	ixgbe_setup_dca(adapter);
-#endif
 }
 
 /**
@@ -6701,10 +6723,16 @@
 	    !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
 		return;
 
+	if (adapter->sfp_poll_time &&
+	    time_after(adapter->sfp_poll_time, jiffies))
+		return; /* If not yet time to poll for SFP */
+
 	/* someone else is in init, wait until next service event */
 	if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
 		return;
 
+	adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
+
 	err = hw->phy.ops.identify_sfp(hw);
 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
 		goto sfp_out;
@@ -8704,8 +8732,7 @@
 	hw->phy.reset_if_overtemp = true;
 	err = hw->mac.ops.reset_hw(hw);
 	hw->phy.reset_if_overtemp = false;
-	if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
-	    hw->mac.type == ixgbe_mac_82598EB) {
+	if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
 		err = 0;
 	} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
 		e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
@@ -9017,7 +9044,8 @@
 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
 		adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
 		dca_remove_requester(&pdev->dev);
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+				IXGBE_DCA_CTRL_DCA_DISABLE);
 	}
 
 #endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 597d0b1..fb8673d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -100,16 +100,17 @@
 }
 
 /**
- *  ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ *  ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation
  *  @hw: pointer to the hardware structure
  *  @addr: I2C bus address to read from
  *  @reg: I2C device register to read from
  *  @val: pointer to location to receive read value
+ *  @lock: true if to take and release semaphore
  *
  *  Returns an error code on error.
- **/
-s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
-				    u16 reg, u16 *val)
+ */
+static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+					       u16 reg, u16 *val, bool lock)
 {
 	u32 swfw_mask = hw->phy.phy_semaphore_mask;
 	int max_retry = 10;
@@ -124,7 +125,7 @@
 	csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
 	csum = ~csum;
 	do {
-		if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+		if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
 			return IXGBE_ERR_SWFW_SYNC;
 		ixgbe_i2c_start(hw);
 		/* Device Address and write indication */
@@ -157,13 +158,15 @@
 		if (ixgbe_clock_out_i2c_bit(hw, false))
 			goto fail;
 		ixgbe_i2c_stop(hw);
-		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+		if (lock)
+			hw->mac.ops.release_swfw_sync(hw, swfw_mask);
 		*val = (high_bits << 8) | low_bits;
 		return 0;
 
 fail:
 		ixgbe_i2c_bus_clear(hw);
-		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+		if (lock)
+			hw->mac.ops.release_swfw_sync(hw, swfw_mask);
 		retry++;
 		if (retry < max_retry)
 			hw_dbg(hw, "I2C byte read combined error - Retry.\n");
@@ -175,17 +178,49 @@
 }
 
 /**
- *  ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ *  ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ *  @hw: pointer to the hardware structure
+ *  @addr: I2C bus address to read from
+ *  @reg: I2C device register to read from
+ *  @val: pointer to location to receive read value
+ *
+ *  Returns an error code on error.
+ */
+s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+				    u16 reg, u16 *val)
+{
+	return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
+}
+
+/**
+ *  ixgbe_read_i2c_combined_generic_unlocked - Unlocked I2C read combined
+ *  @hw: pointer to the hardware structure
+ *  @addr: I2C bus address to read from
+ *  @reg: I2C device register to read from
+ *  @val: pointer to location to receive read value
+ *
+ *  Returns an error code on error.
+ */
+s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
+					     u16 reg, u16 *val)
+{
+	return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false);
+}
+
+/**
+ *  ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation
  *  @hw: pointer to the hardware structure
  *  @addr: I2C bus address to write to
  *  @reg: I2C device register to write to
  *  @val: value to write
+ *  @lock: true if to take and release semaphore
  *
  *  Returns an error code on error.
- **/
-s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
-				     u8 addr, u16 reg, u16 val)
+ */
+static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+						u16 reg, u16 val, bool lock)
 {
+	u32 swfw_mask = hw->phy.phy_semaphore_mask;
 	int max_retry = 1;
 	int retry = 0;
 	u8 reg_high;
@@ -197,6 +232,8 @@
 	csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
 	csum = ~csum;
 	do {
+		if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+			return IXGBE_ERR_SWFW_SYNC;
 		ixgbe_i2c_start(hw);
 		/* Device Address and write indication */
 		if (ixgbe_out_i2c_byte_ack(hw, addr))
@@ -217,10 +254,14 @@
 		if (ixgbe_out_i2c_byte_ack(hw, csum))
 			goto fail;
 		ixgbe_i2c_stop(hw);
+		if (lock)
+			hw->mac.ops.release_swfw_sync(hw, swfw_mask);
 		return 0;
 
 fail:
 		ixgbe_i2c_bus_clear(hw);
+		if (lock)
+			hw->mac.ops.release_swfw_sync(hw, swfw_mask);
 		retry++;
 		if (retry < max_retry)
 			hw_dbg(hw, "I2C byte write combined error - Retry.\n");
@@ -232,6 +273,36 @@
 }
 
 /**
+ *  ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ *  @hw: pointer to the hardware structure
+ *  @addr: I2C bus address to write to
+ *  @reg: I2C device register to write to
+ *  @val: value to write
+ *
+ *  Returns an error code on error.
+ */
+s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+				     u8 addr, u16 reg, u16 val)
+{
+	return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
+}
+
+/**
+ *  ixgbe_write_i2c_combined_generic_unlocked - Unlocked I2C write combined
+ *  @hw: pointer to the hardware structure
+ *  @addr: I2C bus address to write to
+ *  @reg: I2C device register to write to
+ *  @val: value to write
+ *
+ *  Returns an error code on error.
+ */
+s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
+					      u8 addr, u16 reg, u16 val)
+{
+	return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
+}
+
+/**
  *  ixgbe_identify_phy_generic - Get physical layer module
  *  @hw: pointer to hardware structure
  *
@@ -1100,6 +1171,9 @@
 		return IXGBE_ERR_SFP_NOT_PRESENT;
 	}
 
+	/* LAN ID is needed for sfp_type determination */
+	hw->mac.ops.set_lan_id(hw);
+
 	status = hw->phy.ops.read_i2c_eeprom(hw,
 					     IXGBE_SFF_IDENTIFIER,
 					     &identifier);
@@ -1107,9 +1181,6 @@
 	if (status)
 		goto err_read_i2c_eeprom;
 
-	/* LAN ID is needed for sfp_type determination */
-	hw->mac.ops.set_lan_id(hw);
-
 	if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
 		hw->phy.type = ixgbe_phy_sfp_unsupported;
 		return IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1159,7 +1230,7 @@
 			hw->phy.sfp_type = ixgbe_sfp_type_lr;
 		else
 			hw->phy.sfp_type = ixgbe_sfp_type_unknown;
-	} else if (hw->mac.type == ixgbe_mac_82599EB) {
+	} else {
 		if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
 			if (hw->bus.lan_id == 0)
 				hw->phy.sfp_type =
@@ -1660,26 +1731,46 @@
 }
 
 /**
- *  ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * ixgbe_is_sfp_probe - Returns true if SFP is being detected
+ * @hw: pointer to hardware structure
+ * @offset: eeprom offset to be read
+ * @addr: I2C address to be read
+ */
+static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
+{
+	if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
+	    offset == IXGBE_SFF_IDENTIFIER &&
+	    hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+		return true;
+	return false;
+}
+
+/**
+ *  ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset to read
  *  @data: value read
+ *  @lock: true if to take and release semaphore
  *
  *  Performs byte read operation to SFP module's EEPROM over I2C interface at
  *  a specified device address.
- **/
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-				u8 dev_addr, u8 *data)
+ */
+static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+					   u8 dev_addr, u8 *data, bool lock)
 {
 	s32 status;
 	u32 max_retry = 10;
 	u32 retry = 0;
 	u32 swfw_mask = hw->phy.phy_semaphore_mask;
 	bool nack = true;
+
+	if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
+		max_retry = IXGBE_SFP_DETECT_RETRIES;
+
 	*data = 0;
 
 	do {
-		if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+		if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
 			return IXGBE_ERR_SWFW_SYNC;
 
 		ixgbe_i2c_start(hw);
@@ -1721,12 +1812,16 @@
 			goto fail;
 
 		ixgbe_i2c_stop(hw);
-		break;
+		if (lock)
+			hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+		return 0;
 
 fail:
 		ixgbe_i2c_bus_clear(hw);
-		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-		msleep(100);
+		if (lock) {
+			hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+			msleep(100);
+		}
 		retry++;
 		if (retry < max_retry)
 			hw_dbg(hw, "I2C byte read error - Retrying.\n");
@@ -1735,29 +1830,60 @@
 
 	} while (retry < max_retry);
 
-	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-
 	return status;
 }
 
 /**
- *  ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ *  ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to read
+ *  @data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ */
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+				u8 dev_addr, u8 *data)
+{
+	return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+					       data, true);
+}
+
+/**
+ *  ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to read
+ *  @data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ */
+s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+					 u8 dev_addr, u8 *data)
+{
+	return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+					       data, false);
+}
+
+/**
+ *  ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset to write
  *  @data: value to write
+ *  @lock: true if to take and release semaphore
  *
  *  Performs byte write operation to SFP module's EEPROM over I2C interface at
  *  a specified device address.
- **/
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-				 u8 dev_addr, u8 data)
+ */
+static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+					    u8 dev_addr, u8 data, bool lock)
 {
 	s32 status;
 	u32 max_retry = 1;
 	u32 retry = 0;
 	u32 swfw_mask = hw->phy.phy_semaphore_mask;
 
-	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+	if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
 		return IXGBE_ERR_SWFW_SYNC;
 
 	do {
@@ -1788,7 +1914,9 @@
 			goto fail;
 
 		ixgbe_i2c_stop(hw);
-		break;
+		if (lock)
+			hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+		return 0;
 
 fail:
 		ixgbe_i2c_bus_clear(hw);
@@ -1799,21 +1927,57 @@
 			hw_dbg(hw, "I2C byte write error.\n");
 	} while (retry < max_retry);
 
-	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+	if (lock)
+		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
 
 	return status;
 }
 
 /**
+ *  ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to write
+ *  @data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ */
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+				 u8 dev_addr, u8 data)
+{
+	return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+						data, true);
+}
+
+/**
+ *  ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to write
+ *  @data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ */
+s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+					  u8 dev_addr, u8 data)
+{
+	return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+						data, false);
+}
+
+/**
  *  ixgbe_i2c_start - Sets I2C start condition
  *  @hw: pointer to hardware structure
  *
  *  Sets I2C start condition (High -> Low on SDA while SCL is High)
+ *  Set bit-bang mode on X550 hardware.
  **/
 static void ixgbe_i2c_start(struct ixgbe_hw *hw)
 {
 	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 
+	i2cctl |= IXGBE_I2C_BB_EN(hw);
+
 	/* Start condition must begin with data and clock high */
 	ixgbe_set_i2c_data(hw, &i2cctl, 1);
 	ixgbe_raise_i2c_clk(hw, &i2cctl);
@@ -1838,10 +2002,15 @@
  *  @hw: pointer to hardware structure
  *
  *  Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ *  Disables bit-bang mode and negates data output enable on X550
+ *  hardware.
  **/
 static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
 {
 	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
+	u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
+	u32 bb_en_bit = IXGBE_I2C_BB_EN(hw);
 
 	/* Stop condition must begin with data low and clock high */
 	ixgbe_set_i2c_data(hw, &i2cctl, 0);
@@ -1854,6 +2023,13 @@
 
 	/* bus free time between stop and start (4.7us)*/
 	udelay(IXGBE_I2C_T_BUF);
+
+	if (bb_en_bit || data_oe_bit || clk_oe_bit) {
+		i2cctl &= ~bb_en_bit;
+		i2cctl |= data_oe_bit | clk_oe_bit;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
+		IXGBE_WRITE_FLUSH(hw);
+	}
 }
 
 /**
@@ -1868,6 +2044,7 @@
 	s32 i;
 	bool bit = false;
 
+	*data = 0;
 	for (i = 7; i >= 0; i--) {
 		ixgbe_clock_in_i2c_bit(hw, &bit);
 		*data |= bit << i;
@@ -1901,6 +2078,7 @@
 	/* Release SDA line (set high) */
 	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 	i2cctl |= IXGBE_I2C_DATA_OUT(hw);
+	i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw);
 	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
 	IXGBE_WRITE_FLUSH(hw);
 
@@ -1915,15 +2093,21 @@
  **/
 static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
 {
+	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
 	s32 status = 0;
 	u32 i = 0;
 	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 	u32 timeout = 10;
 	bool ack = true;
 
+	if (data_oe_bit) {
+		i2cctl |= IXGBE_I2C_DATA_OUT(hw);
+		i2cctl |= data_oe_bit;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
+		IXGBE_WRITE_FLUSH(hw);
+	}
 	ixgbe_raise_i2c_clk(hw, &i2cctl);
 
-
 	/* Minimum high period of clock is 4us */
 	udelay(IXGBE_I2C_T_HIGH);
 
@@ -1961,7 +2145,14 @@
 static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
 {
 	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
 
+	if (data_oe_bit) {
+		i2cctl |= IXGBE_I2C_DATA_OUT(hw);
+		i2cctl |= data_oe_bit;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
+		IXGBE_WRITE_FLUSH(hw);
+	}
 	ixgbe_raise_i2c_clk(hw, &i2cctl);
 
 	/* Minimum high period of clock is 4us */
@@ -2016,13 +2207,20 @@
  *  @i2cctl: Current value of I2CCTL register
  *
  *  Raises the I2C clock line '0'->'1'
+ *  Negates the I2C clock output enable on X550 hardware.
  **/
 static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
 {
+	u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
 	u32 i = 0;
 	u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
 	u32 i2cctl_r = 0;
 
+	if (clk_oe_bit) {
+		*i2cctl |= clk_oe_bit;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
+	}
+
 	for (i = 0; i < timeout; i++) {
 		*i2cctl |= IXGBE_I2C_CLK_OUT(hw);
 		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
@@ -2042,11 +2240,13 @@
  *  @i2cctl: Current value of I2CCTL register
  *
  *  Lowers the I2C clock line '1'->'0'
+ *  Asserts the I2C clock output enable on X550 hardware.
  **/
 static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
 {
 
 	*i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
+	*i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw);
 
 	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
 	IXGBE_WRITE_FLUSH(hw);
@@ -2062,13 +2262,17 @@
  *  @data: I2C data value (0 or 1) to set
  *
  *  Sets the I2C data bit
+ *  Asserts the I2C data output enable on X550 hardware.
  **/
 static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
 {
+	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
+
 	if (data)
 		*i2cctl |= IXGBE_I2C_DATA_OUT(hw);
 	else
 		*i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
+	*i2cctl &= ~data_oe_bit;
 
 	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
 	IXGBE_WRITE_FLUSH(hw);
@@ -2076,6 +2280,14 @@
 	/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
 	udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
 
+	if (!data)	/* Can't verify data in this case */
+		return 0;
+	if (data_oe_bit) {
+		*i2cctl |= data_oe_bit;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
+		IXGBE_WRITE_FLUSH(hw);
+	}
+
 	/* Verify data was set correctly */
 	*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 	if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
@@ -2092,9 +2304,19 @@
  *  @i2cctl: Current value of I2CCTL register
  *
  *  Returns the I2C data bit value
+ *  Negates the I2C data output enable on X550 hardware.
  **/
 static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
 {
+	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
+
+	if (data_oe_bit) {
+		*i2cctl |= data_oe_bit;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
+		IXGBE_WRITE_FLUSH(hw);
+		udelay(IXGBE_I2C_T_FALL);
+	}
+
 	if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
 		return true;
 	return false;
@@ -2109,10 +2331,11 @@
  **/
 static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
 {
-	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+	u32 i2cctl;
 	u32 i;
 
 	ixgbe_i2c_start(hw);
+	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
 
 	ixgbe_set_i2c_data(hw, &i2cctl, 1);
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index e45988c..5abd66c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -66,6 +66,9 @@
 #define IXGBE_SFF_1GBASET_CAPABLE		0x8
 #define IXGBE_SFF_10GBASESR_CAPABLE		0x10
 #define IXGBE_SFF_10GBASELR_CAPABLE		0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK		0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G		0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G		0x0
 #define IXGBE_SFF_ADDRESSING_MODE		0x4
 #define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE		0x1
 #define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE		0x8
@@ -78,9 +81,29 @@
 #define IXGBE_I2C_EEPROM_STATUS_FAIL		0x2
 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS	0x3
 #define IXGBE_CS4227				0xBE    /* CS4227 address */
-#define IXGBE_CS4227_SPARE24_LSB		0x12B0  /* Reg to program EDC */
+#define IXGBE_CS4227_SCRATCH			2
+#define IXGBE_CS4227_RESET_PENDING		0x1357
+#define IXGBE_CS4227_RESET_COMPLETE		0x5AA5
+#define IXGBE_CS4227_RETRIES			15
+#define IXGBE_CS4227_EFUSE_STATUS		0x0181
+#define IXGBE_CS4227_LINE_SPARE22_MSB		0x12AD	/* Reg to set speed */
+#define IXGBE_CS4227_LINE_SPARE24_LSB		0x12B0	/* Reg to set EDC */
+#define IXGBE_CS4227_HOST_SPARE22_MSB		0x1AAD	/* Reg to set speed */
+#define IXGBE_CS4227_HOST_SPARE24_LSB		0x1AB0	/* Reg to program EDC */
+#define IXGBE_CS4227_EEPROM_STATUS		0x5001
+#define IXGBE_CS4227_EEPROM_LOAD_OK		0x0001
+#define IXGBE_CS4227_SPEED_1G			0x8000
+#define IXGBE_CS4227_SPEED_10G			0
 #define IXGBE_CS4227_EDC_MODE_CX1		0x0002
 #define IXGBE_CS4227_EDC_MODE_SR		0x0004
+#define IXGBE_CS4227_EDC_MODE_DIAG		0x0008
+#define IXGBE_CS4227_RESET_HOLD			500	/* microseconds */
+#define IXGBE_CS4227_RESET_DELAY		500	/* milliseconds */
+#define IXGBE_CS4227_CHECK_DELAY		30	/* milliseconds */
+#define IXGBE_PE				0xE0	/* Port expander addr */
+#define IXGBE_PE_OUTPUT				1	/* Output reg offset */
+#define IXGBE_PE_CONFIG				3	/* Config reg offset */
+#define IXGBE_PE_BIT1				(1 << 1)
 
 /* Flow control defines */
 #define IXGBE_TAF_SYM_PAUSE                  0x400
@@ -109,6 +132,8 @@
 #define IXGBE_I2C_T_SU_STO  4
 #define IXGBE_I2C_T_BUF     5
 
+#define IXGBE_SFP_DETECT_RETRIES	2
+
 #define IXGBE_TN_LASI_STATUS_REG        0x9005
 #define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
 
@@ -154,8 +179,12 @@
 s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
 				u8 dev_addr, u8 *data);
+s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+					 u8 dev_addr, u8 *data);
 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
 				 u8 dev_addr, u8 data);
+s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+					  u8 dev_addr, u8 data);
 s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
 				  u8 *eeprom_data);
 s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
@@ -164,6 +193,10 @@
 				   u8 eeprom_data);
 s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
 				    u16 reg, u16 *val);
+s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
+					     u16 reg, u16 *val);
 s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
 				     u16 reg, u16 val);
+s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
+					      u16 reg, u16 val);
 #endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6368919..939c90c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -402,6 +402,7 @@
 #define IXGBE_FDIRSIP4M 0x0EE40
 #define IXGBE_FDIRTCPM  0x0EE44
 #define IXGBE_FDIRUDPM  0x0EE48
+#define IXGBE_FDIRSCTPM	0x0EE78
 #define IXGBE_FDIRIP6M  0x0EE74
 #define IXGBE_FDIRM     0x0EE70
 
@@ -1192,6 +1193,7 @@
 /* RDRXCTL Bit Masks */
 #define IXGBE_RDRXCTL_RDMTS_1_2     0x00000000 /* Rx Desc Min Threshold Size */
 #define IXGBE_RDRXCTL_CRCSTRIP      0x00000002 /* CRC Strip */
+#define IXGBE_RDRXCTL_PSP           0x00000004 /* Pad small packet */
 #define IXGBE_RDRXCTL_MVMEN         0x00000020
 #define IXGBE_RDRXCTL_DMAIDONE      0x00000008 /* DMA init cycle done */
 #define IXGBE_RDRXCTL_AGGDIS        0x00010000 /* Aggregation disable */
@@ -1948,6 +1950,7 @@
 #define IXGBE_GSSR_SW_MNG_SM		0x0400
 #define IXGBE_GSSR_SHARED_I2C_SM	0x1806 /* Wait for both phys & I2Cs */
 #define IXGBE_GSSR_I2C_MASK		0x1800
+#define IXGBE_GSSR_NVM_PHY_MASK		0xF
 
 /* FW Status register bitmask */
 #define IXGBE_FWSTS_FWRI    0x00000200 /* Firmware Reset Indication */
@@ -3255,9 +3258,11 @@
 	void (*flap_tx_laser)(struct ixgbe_hw *);
 	void (*stop_link_on_d3)(struct ixgbe_hw *);
 	s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+	s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
 	s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
 	s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
 				     bool *);
+	void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed);
 
 	/* Packet Buffer Manipulation */
 	void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
@@ -3328,6 +3333,10 @@
 	s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
 	s32 (*enter_lplu)(struct ixgbe_hw *);
 	s32 (*handle_lasi)(struct ixgbe_hw *hw);
+	s32 (*read_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+					  u16 *value);
+	s32 (*write_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+					   u16 value);
 };
 
 struct ixgbe_eeprom_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 4e75843..c1d4584 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -567,19 +567,25 @@
  **/
 s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
 {
-	u32 swfw_sync;
-	u32 swmask = mask;
-	u32 fwmask = mask << 5;
-	u32 hwmask = 0;
+	u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
+	u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
+	u32 fwmask = swmask << 5;
 	u32 timeout = 200;
+	u32 hwmask = 0;
+	u32 swfw_sync;
 	u32 i;
 
-	if (swmask == IXGBE_GSSR_EEP_SM)
+	if (swmask & IXGBE_GSSR_EEP_SM)
 		hwmask = IXGBE_GSSR_FLASH_SM;
 
+	/* SW only mask does not have FW bit pair */
+	if (mask & IXGBE_GSSR_SW_MNG_SM)
+		swmask |= IXGBE_GSSR_SW_MNG_SM;
+
+	swmask |= swi2c_mask;
+	fwmask |= swi2c_mask << 2;
 	for (i = 0; i < timeout; i++) {
-		/*
-		 * SW NVM semaphore bit is used for access to all
+		/* SW NVM semaphore bit is used for access to all
 		 * SW_FW_SYNC bits (not just NVM)
 		 */
 		if (ixgbe_get_swfw_sync_semaphore(hw))
@@ -590,39 +596,56 @@
 			swfw_sync |= swmask;
 			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
 			ixgbe_release_swfw_sync_semaphore(hw);
-			break;
-		} else {
-			/*
-			 * Firmware currently using resource (fwmask),
-			 * hardware currently using resource (hwmask),
-			 * or other software thread currently using
-			 * resource (swmask)
-			 */
-			ixgbe_release_swfw_sync_semaphore(hw);
-			usleep_range(5000, 10000);
+			usleep_range(5000, 6000);
+			return 0;
 		}
+		/* Firmware currently using resource (fwmask), hardware
+		 * currently using resource (hwmask), or other software
+		 * thread currently using resource (swmask)
+		 */
+		ixgbe_release_swfw_sync_semaphore(hw);
+		usleep_range(5000, 10000);
 	}
 
-	/*
-	 * If the resource is not released by the FW/HW the SW can assume that
-	 * the FW/HW malfunctions. In that case the SW should sets the
-	 * SW bit(s) of the requested resource(s) while ignoring the
-	 * corresponding FW/HW bits in the SW_FW_SYNC register.
+	/* Failed to get SW only semaphore */
+	if (swmask == IXGBE_GSSR_SW_MNG_SM) {
+		hw_dbg(hw, "Failed to get SW only semaphore\n");
+		return IXGBE_ERR_SWFW_SYNC;
+	}
+
+	/* If the resource is not released by the FW/HW the SW can assume that
+	 * the FW/HW malfunctions. In that case the SW should set the SW bit(s)
+	 * of the requested resource(s) while ignoring the corresponding FW/HW
+	 * bits in the SW_FW_SYNC register.
 	 */
-	if (i >= timeout) {
-		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
-		if (swfw_sync & (fwmask | hwmask)) {
-			if (ixgbe_get_swfw_sync_semaphore(hw))
-				return IXGBE_ERR_SWFW_SYNC;
-
-			swfw_sync |= swmask;
-			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
-			ixgbe_release_swfw_sync_semaphore(hw);
-		}
+	if (ixgbe_get_swfw_sync_semaphore(hw))
+		return IXGBE_ERR_SWFW_SYNC;
+	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
+	if (swfw_sync & (fwmask | hwmask)) {
+		swfw_sync |= swmask;
+		IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
+		ixgbe_release_swfw_sync_semaphore(hw);
+		usleep_range(5000, 6000);
+		return 0;
 	}
+	/* If the resource is not released by other SW the SW can assume that
+	 * the other SW malfunctions. In that case the SW should clear all SW
+	 * flags that it does not own and then repeat the whole process once
+	 * again.
+	 */
+	if (swfw_sync & swmask) {
+		u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
+			    IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM;
 
-	usleep_range(5000, 10000);
-	return 0;
+		if (swi2c_mask)
+			rmask |= IXGBE_GSSR_I2C_MASK;
+		ixgbe_release_swfw_sync_X540(hw, rmask);
+		ixgbe_release_swfw_sync_semaphore(hw);
+		return IXGBE_ERR_SWFW_SYNC;
+	}
+	ixgbe_release_swfw_sync_semaphore(hw);
+
+	return IXGBE_ERR_SWFW_SYNC;
 }
 
 /**
@@ -635,9 +658,11 @@
  **/
 void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
 {
+	u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM);
 	u32 swfw_sync;
-	u32 swmask = mask;
 
+	if (mask & IXGBE_GSSR_I2C_MASK)
+		swmask |= mask & IXGBE_GSSR_I2C_MASK;
 	ixgbe_get_swfw_sync_semaphore(hw);
 
 	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
@@ -645,7 +670,7 @@
 	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
 
 	ixgbe_release_swfw_sync_semaphore(hw);
-	usleep_range(5000, 10000);
+	usleep_range(5000, 6000);
 }
 
 /**
@@ -686,6 +711,11 @@
 		usleep_range(50, 100);
 	}
 
+	/* Release semaphores and return error if SW NVM semaphore
+	 * was not granted because we do not have access to the EEPROM
+	 */
+	hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n");
+	ixgbe_release_swfw_sync_semaphore(hw);
 	return IXGBE_ERR_EEPROM;
 }
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 9fe9445..ed7b289 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -56,6 +56,283 @@
 	IXGBE_WRITE_FLUSH(hw);
 }
 
+/**
+ * ixgbe_read_cs4227 - Read CS4227 register
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: pointer to receive value read
+ *
+ * Returns status code
+ */
+static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
+{
+	return hw->phy.ops.read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg,
+						      value);
+}
+
+/**
+ * ixgbe_write_cs4227 - Write CS4227 register
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: value to write to register
+ *
+ * Returns status code
+ */
+static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
+{
+	return hw->phy.ops.write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg,
+						       value);
+}
+
+/**
+ * ixgbe_check_cs4227_reg - Perform diag on a CS4227 register
+ * @hw: pointer to hardware structure
+ * @reg: the register to check
+ *
+ * Performs a diagnostic on a register in the CS4227 chip. Returns an error
+ * if it is not operating correctly.
+ * This function assumes that the caller has acquired the proper semaphore.
+ */
+static s32 ixgbe_check_cs4227_reg(struct ixgbe_hw *hw, u16 reg)
+{
+	s32 status;
+	u32 retry;
+	u16 reg_val;
+
+	reg_val = (IXGBE_CS4227_EDC_MODE_DIAG << 1) | 1;
+	status = ixgbe_write_cs4227(hw, reg, reg_val);
+	if (status)
+		return status;
+	for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
+		msleep(IXGBE_CS4227_CHECK_DELAY);
+		reg_val = 0xFFFF;
+		ixgbe_read_cs4227(hw, reg, &reg_val);
+		if (!reg_val)
+			break;
+	}
+	if (reg_val) {
+		hw_err(hw, "CS4227 reg 0x%04X failed diagnostic\n", reg);
+		return status;
+	}
+
+	return 0;
+}
+
+/**
+ * ixgbe_get_cs4227_status - Return CS4227 status
+ * @hw: pointer to hardware structure
+ *
+ * Performs a diagnostic on the CS4227 chip. Returns an error if it is
+ * not operating correctly.
+ * This function assumes that the caller has acquired the proper semaphore.
+ */
+static s32 ixgbe_get_cs4227_status(struct ixgbe_hw *hw)
+{
+	s32 status;
+	u16 value = 0;
+
+	/* Exit if the diagnostic has already been performed. */
+	status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
+	if (status)
+		return status;
+	if (value == IXGBE_CS4227_RESET_COMPLETE)
+		return 0;
+
+	/* Check port 0. */
+	status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB);
+	if (status)
+		return status;
+
+	status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB);
+	if (status)
+		return status;
+
+	/* Check port 1. */
+	status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB +
+					(1 << 12));
+	if (status)
+		return status;
+
+	return ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB +
+				      (1 << 12));
+}
+
+/**
+ * ixgbe_read_pe - Read register from port expander
+ * @hw: pointer to hardware structure
+ * @reg: register number to read
+ * @value: pointer to receive read value
+ *
+ * Returns status code
+ */
+static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
+{
+	s32 status;
+
+	status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value);
+	if (status)
+		hw_err(hw, "port expander access failed with %d\n", status);
+	return status;
+}
+
+/**
+ * ixgbe_write_pe - Write register to port expander
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: value to write
+ *
+ * Returns status code
+ */
+static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
+{
+	s32 status;
+
+	status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE,
+						       value);
+	if (status)
+		hw_err(hw, "port expander access failed with %d\n", status);
+	return status;
+}
+
+/**
+ * ixgbe_reset_cs4227 - Reset CS4227 using port expander
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
+{
+	s32 status;
+	u32 retry;
+	u16 value;
+	u8 reg;
+
+	/* Trigger hard reset. */
+	status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+	if (status)
+		return status;
+	reg |= IXGBE_PE_BIT1;
+	status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+	if (status)
+		return status;
+
+	status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, &reg);
+	if (status)
+		return status;
+	reg &= ~IXGBE_PE_BIT1;
+	status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
+	if (status)
+		return status;
+
+	status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+	if (status)
+		return status;
+	reg &= ~IXGBE_PE_BIT1;
+	status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+	if (status)
+		return status;
+
+	usleep_range(IXGBE_CS4227_RESET_HOLD, IXGBE_CS4227_RESET_HOLD + 100);
+
+	status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+	if (status)
+		return status;
+	reg |= IXGBE_PE_BIT1;
+	status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+	if (status)
+		return status;
+
+	/* Wait for the reset to complete. */
+	msleep(IXGBE_CS4227_RESET_DELAY);
+	for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
+		status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
+					   &value);
+		if (!status && value == IXGBE_CS4227_EEPROM_LOAD_OK)
+			break;
+		msleep(IXGBE_CS4227_CHECK_DELAY);
+	}
+	if (retry == IXGBE_CS4227_RETRIES) {
+		hw_err(hw, "CS4227 reset did not complete\n");
+		return IXGBE_ERR_PHY;
+	}
+
+	status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
+	if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
+		hw_err(hw, "CS4227 EEPROM did not load successfully\n");
+		return IXGBE_ERR_PHY;
+	}
+
+	return 0;
+}
+
+/**
+ * ixgbe_check_cs4227 - Check CS4227 and reset as needed
+ * @hw: pointer to hardware structure
+ */
+static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
+{
+	u32 swfw_mask = hw->phy.phy_semaphore_mask;
+	s32 status;
+	u16 value;
+	u8 retry;
+
+	for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
+		status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+		if (status) {
+			hw_err(hw, "semaphore failed with %d\n", status);
+			msleep(IXGBE_CS4227_CHECK_DELAY);
+			continue;
+		}
+
+		/* Get status of reset flow. */
+		status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
+		if (!status && value == IXGBE_CS4227_RESET_COMPLETE)
+			goto out;
+
+		if (status || value != IXGBE_CS4227_RESET_PENDING)
+			break;
+
+		/* Reset is pending. Wait and check again. */
+		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+		msleep(IXGBE_CS4227_CHECK_DELAY);
+	}
+
+	/* Reset the CS4227. */
+	status = ixgbe_reset_cs4227(hw);
+	if (status) {
+		hw_err(hw, "CS4227 reset failed: %d", status);
+		goto out;
+	}
+
+	/* Reset takes so long, temporarily release semaphore in case the
+	 * other driver instance is waiting for the reset indication.
+	 */
+	ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
+			   IXGBE_CS4227_RESET_PENDING);
+	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+	usleep_range(10000, 12000);
+	status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+	if (status) {
+		hw_err(hw, "semaphore failed with %d", status);
+		return;
+	}
+
+	/* Is the CS4227 working correctly? */
+	status = ixgbe_get_cs4227_status(hw);
+	if (status) {
+		hw_err(hw, "CS4227 status failed: %d", status);
+		goto out;
+	}
+
+	/* Record completion for next time. */
+	status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
+				    IXGBE_CS4227_RESET_COMPLETE);
+
+out:
+	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+	msleep(hw->eeprom.semaphore_delay);
+}
+
 /** ixgbe_identify_phy_x550em - Get PHY type based on device id
  *  @hw: pointer to hardware structure
  *
@@ -68,7 +345,7 @@
 		/* set up for CS4227 usage */
 		hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
 		ixgbe_setup_mux_ctl(hw);
-
+		ixgbe_check_cs4227(hw);
 		return ixgbe_identify_module_generic(hw);
 	case IXGBE_DEV_ID_X550EM_X_KX4:
 		hw->phy.type = ixgbe_phy_x550em_kx4;
@@ -910,6 +1187,96 @@
 }
 
 /**
+ *  ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
+ *  @hw: pointer to hardware structure
+ *  @linear: true if SFP module is linear
+ */
+static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+{
+	switch (hw->phy.sfp_type) {
+	case ixgbe_sfp_type_not_present:
+		return IXGBE_ERR_SFP_NOT_PRESENT;
+	case ixgbe_sfp_type_da_cu_core0:
+	case ixgbe_sfp_type_da_cu_core1:
+		*linear = true;
+		break;
+	case ixgbe_sfp_type_srlr_core0:
+	case ixgbe_sfp_type_srlr_core1:
+	case ixgbe_sfp_type_da_act_lmt_core0:
+	case ixgbe_sfp_type_da_act_lmt_core1:
+	case ixgbe_sfp_type_1g_sx_core0:
+	case ixgbe_sfp_type_1g_sx_core1:
+	case ixgbe_sfp_type_1g_lx_core0:
+	case ixgbe_sfp_type_1g_lx_core1:
+		*linear = false;
+		break;
+	case ixgbe_sfp_type_unknown:
+	case ixgbe_sfp_type_1g_cu_core0:
+	case ixgbe_sfp_type_1g_cu_core1:
+	default:
+		return IXGBE_ERR_SFP_NOT_SUPPORTED;
+	}
+
+	return 0;
+}
+
+/**
+ *  ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
+ *  @hw: pointer to hardware structure
+ *
+ *  Configures the extern PHY and the integrated KR PHY for SFP support.
+ */
+static s32
+ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
+				ixgbe_link_speed speed,
+				__always_unused bool autoneg_wait_to_complete)
+{
+	s32 status;
+	u16 slice, value;
+	bool setup_linear = false;
+
+	/* Check if SFP module is supported and linear */
+	status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+	/* If no SFP module present, then return success. Return success since
+	 * there is no reason to configure CS4227 and SFP not present error is
+	 * not accepted in the setup MAC link flow.
+	 */
+	if (status == IXGBE_ERR_SFP_NOT_PRESENT)
+		return 0;
+
+	if (status)
+		return status;
+
+	/* Configure CS4227 LINE side to 10G SR. */
+	slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12);
+	value = IXGBE_CS4227_SPEED_10G;
+	status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice,
+						  value);
+
+	/* Configure CS4227 for HOST connection rate then type. */
+	slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12);
+	value = speed & IXGBE_LINK_SPEED_10GB_FULL ?
+		IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G;
+	status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice,
+						  value);
+
+	slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12);
+	if (setup_linear)
+		value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
+	else
+		value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
+	status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice,
+						  value);
+
+	/* If internal link mode is XFI, then setup XFI internal link. */
+	if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))
+		status = ixgbe_setup_ixfi_x550em(hw, &speed);
+
+	return status;
+}
+
+/**
  * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
  * @hw: pointer to hardware structure
  * @speed: new link speed
@@ -1003,6 +1370,10 @@
 		mac->ops.disable_tx_laser = NULL;
 		mac->ops.enable_tx_laser = NULL;
 		mac->ops.flap_tx_laser = NULL;
+		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
+		mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em;
+		mac->ops.set_rate_select_speed =
+					ixgbe_set_soft_rate_select_speed;
 		break;
 	case ixgbe_media_type_copper:
 		mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
@@ -1018,53 +1389,18 @@
  */
 static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
 {
-	bool setup_linear;
-	u16 reg_slice, edc_mode;
-	s32 ret_val;
+	s32 status;
+	bool linear;
 
-	switch (hw->phy.sfp_type) {
-	case ixgbe_sfp_type_unknown:
-		return 0;
-	case ixgbe_sfp_type_not_present:
-		return IXGBE_ERR_SFP_NOT_PRESENT;
-	case ixgbe_sfp_type_da_cu_core0:
-	case ixgbe_sfp_type_da_cu_core1:
-		setup_linear = true;
-		break;
-	case ixgbe_sfp_type_srlr_core0:
-	case ixgbe_sfp_type_srlr_core1:
-	case ixgbe_sfp_type_da_act_lmt_core0:
-	case ixgbe_sfp_type_da_act_lmt_core1:
-	case ixgbe_sfp_type_1g_sx_core0:
-	case ixgbe_sfp_type_1g_sx_core1:
-		setup_linear = false;
-		break;
-	default:
-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
-	}
+	/* Check if SFP module is supported */
+	status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
+	if (status)
+		return status;
 
 	ixgbe_init_mac_link_ops_X550em(hw);
 	hw->phy.ops.reset = NULL;
 
-	/* The CS4227 slice address is the base address + the port-pair reg
-	 * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0.
-	 */
-	reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12);
-
-	if (setup_linear)
-		edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
-	else
-		edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
-
-	/* Configure CS4227 for connection type. */
-	ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
-						 edc_mode);
-
-	if (ret_val)
-		ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice,
-							 edc_mode);
-
-	return ret_val;
+	return 0;
 }
 
 /** ixgbe_get_link_capabilities_x550em - Determines link capabilities
@@ -1927,6 +2263,62 @@
 	IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
 }
 
+/**
+ * ixgbe_set_mux - Set mux for port 1 access with CS4227
+ * @hw: pointer to hardware structure
+ * @state: set mux if 1, clear if 0
+ */
+static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
+{
+	u32 esdp;
+
+	if (!hw->bus.lan_id)
+		return;
+	esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+	if (state)
+		esdp |= IXGBE_ESDP_SDP1;
+	else
+		esdp &= ~IXGBE_ESDP_SDP1;
+	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and sets the I2C MUX
+ */
+static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+{
+	s32 status;
+
+	status = ixgbe_acquire_swfw_sync_X540(hw, mask);
+	if (status)
+		return status;
+
+	if (mask & IXGBE_GSSR_I2C_MASK)
+		ixgbe_set_mux(hw, 1);
+
+	return 0;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore and sets the I2C MUX
+ */
+static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+{
+	if (mask & IXGBE_GSSR_I2C_MASK)
+		ixgbe_set_mux(hw, 0);
+
+	ixgbe_release_swfw_sync_X540(hw, mask);
+}
+
 #define X550_COMMON_MAC \
 	.init_hw			= &ixgbe_init_hw_generic, \
 	.start_hw			= &ixgbe_start_hw_X540, \
@@ -1964,8 +2356,6 @@
 				&ixgbe_set_source_address_pruning_X550, \
 	.set_ethertype_anti_spoofing	= \
 				&ixgbe_set_ethertype_anti_spoofing_X550, \
-	.acquire_swfw_sync		= &ixgbe_acquire_swfw_sync_X540, \
-	.release_swfw_sync		= &ixgbe_release_swfw_sync_X540, \
 	.disable_rx_buff		= &ixgbe_disable_rx_buff_generic, \
 	.enable_rx_buff			= &ixgbe_enable_rx_buff_generic, \
 	.get_thermal_sensor_data	= NULL, \
@@ -1985,6 +2375,8 @@
 	.get_link_capabilities	= &ixgbe_get_copper_link_capabilities_generic,
 	.get_bus_info		= &ixgbe_get_bus_info_generic,
 	.setup_sfp		= NULL,
+	.acquire_swfw_sync	= &ixgbe_acquire_swfw_sync_X540,
+	.release_swfw_sync	= &ixgbe_release_swfw_sync_X540,
 };
 
 static struct ixgbe_mac_operations mac_ops_X550EM_x = {
@@ -1997,7 +2389,8 @@
 	.get_link_capabilities	= &ixgbe_get_link_capabilities_X550em,
 	.get_bus_info		= &ixgbe_get_bus_info_X550em,
 	.setup_sfp		= ixgbe_setup_sfp_modules_X550em,
-
+	.acquire_swfw_sync	= &ixgbe_acquire_swfw_sync_X550em,
+	.release_swfw_sync	= &ixgbe_release_swfw_sync_X550em,
 };
 
 #define X550_COMMON_EEP \
@@ -2039,14 +2432,17 @@
 	X550_COMMON_PHY
 	.init			= NULL,
 	.identify		= &ixgbe_identify_phy_generic,
-	.read_i2c_combined	= &ixgbe_read_i2c_combined_generic,
-	.write_i2c_combined	= &ixgbe_write_i2c_combined_generic,
 };
 
 static struct ixgbe_phy_operations phy_ops_X550EM_x = {
 	X550_COMMON_PHY
 	.init			= &ixgbe_init_phy_ops_X550em,
 	.identify		= &ixgbe_identify_phy_x550em,
+	.read_i2c_combined	= &ixgbe_read_i2c_combined_generic,
+	.write_i2c_combined	= &ixgbe_write_i2c_combined_generic,
+	.read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
+	.write_i2c_combined_unlocked =
+				     &ixgbe_write_i2c_combined_generic_unlocked,
 };
 
 static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 149a0b4..35da2d7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3896,6 +3896,7 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= ixgbevf_netpoll,
 #endif
+	.ndo_features_check	= passthru_features_check,
 };
 
 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index fe2299a..dd6fe94 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -32,6 +32,7 @@
 #include <linux/of_address.h>
 #include <linux/phy.h>
 #include <linux/clk.h>
+#include <linux/cpu.h>
 
 /* Registers */
 #define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
@@ -285,23 +286,34 @@
 	u64	tx_bytes;
 };
 
+struct mvneta_pcpu_port {
+	/* Pointer to the shared port */
+	struct mvneta_port	*pp;
+
+	/* Pointer to the CPU-local NAPI struct */
+	struct napi_struct	napi;
+
+	/* Cause of the previous interrupt */
+	u32			cause_rx_tx;
+};
+
 struct mvneta_port {
+	struct mvneta_pcpu_port __percpu	*ports;
+	struct mvneta_pcpu_stats __percpu	*stats;
+
 	int pkt_size;
 	unsigned int frag_size;
 	void __iomem *base;
 	struct mvneta_rx_queue *rxqs;
 	struct mvneta_tx_queue *txqs;
 	struct net_device *dev;
-
-	u32 cause_rx_tx;
-	struct napi_struct napi;
+	struct notifier_block cpu_notifier;
 
 	/* Core clock */
 	struct clk *clk;
 	u8 mcast_count[256];
 	u16 tx_ring_size;
 	u16 rx_ring_size;
-	struct mvneta_pcpu_stats *stats;
 
 	struct mii_bus *mii_bus;
 	struct phy_device *phy_dev;
@@ -468,7 +480,7 @@
 /* The hardware supports eight (8) rx queues, but we are only allowing
  * the first one to be used. Therefore, let's just allocate one queue.
  */
-static int rxq_number = 1;
+static int rxq_number = 8;
 static int txq_number = 8;
 
 static int rxq_def;
@@ -756,14 +768,7 @@
 	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
 
 	/* Enable all initialized RXQs. */
-	q_map = 0;
-	for (queue = 0; queue < rxq_number; queue++) {
-		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
-		if (rxq->descs != NULL)
-			q_map |= (1 << queue);
-	}
-
-	mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
+	mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def));
 }
 
 /* Stop the Ethernet port activity */
@@ -949,7 +954,7 @@
 	/* Set CPU queue access map - all CPUs have access to all RX
 	 * queues and to all TX queues
 	 */
-	for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
+	for_each_present_cpu(cpu)
 		mvreg_write(pp, MVNETA_CPU_MAP(cpu),
 			    (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
 			     MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
@@ -1426,17 +1431,6 @@
 	return MVNETA_TX_L4_CSUM_NOT;
 }
 
-/* Returns rx queue pointer (find last set bit) according to causeRxTx
- * value
- */
-static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
-						u32 cause)
-{
-	int queue = fls(cause >> 8) - 1;
-
-	return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
-}
-
 /* Drop packets received by the RXQ and free buffers */
 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
 				 struct mvneta_rx_queue *rxq)
@@ -1461,6 +1455,7 @@
 static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
 		     struct mvneta_rx_queue *rxq)
 {
+	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
 	struct net_device *dev = pp->dev;
 	int rx_done;
 	u32 rcvd_pkts = 0;
@@ -1479,6 +1474,7 @@
 		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
 		struct sk_buff *skb;
 		unsigned char *data;
+		dma_addr_t phys_addr;
 		u32 rx_status;
 		int rx_bytes, err;
 
@@ -1486,6 +1482,7 @@
 		rx_status = rx_desc->status;
 		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
 		data = (unsigned char *)rx_desc->buf_cookie;
+		phys_addr = rx_desc->buf_phys_addr;
 
 		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
 		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
@@ -1513,7 +1510,7 @@
 
 			skb->protocol = eth_type_trans(skb, dev);
 			mvneta_rx_csum(pp, rx_status, skb);
-			napi_gro_receive(&pp->napi, skb);
+			napi_gro_receive(&port->napi, skb);
 
 			rcvd_pkts++;
 			rcvd_bytes += rx_bytes;
@@ -1534,7 +1531,7 @@
 		if (!skb)
 			goto err_drop_frame;
 
-		dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr,
+		dma_unmap_single(dev->dev.parent, phys_addr,
 				 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
 
 		rcvd_pkts++;
@@ -1548,7 +1545,7 @@
 
 		mvneta_rx_csum(pp, rx_status, skb);
 
-		napi_gro_receive(&pp->napi, skb);
+		napi_gro_receive(&port->napi, skb);
 	}
 
 	if (rcvd_pkts) {
@@ -2059,12 +2056,10 @@
 /* Interrupt handling - the callback for request_irq() */
 static irqreturn_t mvneta_isr(int irq, void *dev_id)
 {
-	struct mvneta_port *pp = (struct mvneta_port *)dev_id;
+	struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
 
-	/* Mask all interrupts */
-	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
-
-	napi_schedule(&pp->napi);
+	disable_percpu_irq(port->pp->dev->irq);
+	napi_schedule(&port->napi);
 
 	return IRQ_HANDLED;
 }
@@ -2102,11 +2097,11 @@
 {
 	int rx_done = 0;
 	u32 cause_rx_tx;
-	unsigned long flags;
 	struct mvneta_port *pp = netdev_priv(napi->dev);
+	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
 
 	if (!netif_running(pp->dev)) {
-		napi_complete(napi);
+		napi_complete(&port->napi);
 		return rx_done;
 	}
 
@@ -2133,47 +2128,17 @@
 	/* For the case where the last mvneta_poll did not process all
 	 * RX packets
 	 */
-	cause_rx_tx |= pp->cause_rx_tx;
-	if (rxq_number > 1) {
-		while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
-			int count;
-			struct mvneta_rx_queue *rxq;
-			/* get rx queue number from cause_rx_tx */
-			rxq = mvneta_rx_policy(pp, cause_rx_tx);
-			if (!rxq)
-				break;
-
-			/* process the packet in that rx queue */
-			count = mvneta_rx(pp, budget, rxq);
-			rx_done += count;
-			budget -= count;
-			if (budget > 0) {
-				/* set off the rx bit of the
-				 * corresponding bit in the cause rx
-				 * tx register, so that next iteration
-				 * will find the next rx queue where
-				 * packets are received on
-				 */
-				cause_rx_tx &= ~((1 << rxq->id) << 8);
-			}
-		}
-	} else {
-		rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
-		budget -= rx_done;
-	}
+	cause_rx_tx |= port->cause_rx_tx;
+	rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
+	budget -= rx_done;
 
 	if (budget > 0) {
 		cause_rx_tx = 0;
-		napi_complete(napi);
-		local_irq_save(flags);
-		mvreg_write(pp, MVNETA_INTR_NEW_MASK,
-			    MVNETA_RX_INTR_MASK(rxq_number) |
-			    MVNETA_TX_INTR_MASK(txq_number) |
-			    MVNETA_MISCINTR_INTR_MASK);
-		local_irq_restore(flags);
+		napi_complete(&port->napi);
+		enable_percpu_irq(pp->dev->irq, 0);
 	}
 
-	pp->cause_rx_tx = cause_rx_tx;
+	port->cause_rx_tx = cause_rx_tx;
 	return rx_done;
 }
 
@@ -2377,26 +2342,19 @@
 /* Cleanup all Rx queues */
 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
 {
-	int queue;
-
-	for (queue = 0; queue < rxq_number; queue++)
-		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
+	mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]);
 }
 
 
 /* Init all Rx queues */
 static int mvneta_setup_rxqs(struct mvneta_port *pp)
 {
-	int queue;
-
-	for (queue = 0; queue < rxq_number; queue++) {
-		int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
-		if (err) {
-			netdev_err(pp->dev, "%s: can't create rxq=%d\n",
-				   __func__, queue);
-			mvneta_cleanup_rxqs(pp);
-			return err;
-		}
+	int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]);
+	if (err) {
+		netdev_err(pp->dev, "%s: can't create rxq=%d\n",
+			   __func__, rxq_def);
+		mvneta_cleanup_rxqs(pp);
+		return err;
 	}
 
 	return 0;
@@ -2422,6 +2380,8 @@
 
 static void mvneta_start_dev(struct mvneta_port *pp)
 {
+	unsigned int cpu;
+
 	mvneta_max_rx_size_set(pp, pp->pkt_size);
 	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
 
@@ -2429,7 +2389,11 @@
 	mvneta_port_enable(pp);
 
 	/* Enable polling on the port */
-	napi_enable(&pp->napi);
+	for_each_present_cpu(cpu) {
+		struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+		napi_enable(&port->napi);
+	}
 
 	/* Unmask interrupts */
 	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
@@ -2447,9 +2411,15 @@
 
 static void mvneta_stop_dev(struct mvneta_port *pp)
 {
+	unsigned int cpu;
+
 	phy_stop(pp->phy_dev);
 
-	napi_disable(&pp->napi);
+	for_each_present_cpu(cpu) {
+		struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+		napi_disable(&port->napi);
+	}
 
 	netif_carrier_off(pp->dev);
 
@@ -2689,6 +2659,125 @@
 	pp->phy_dev = NULL;
 }
 
+static void mvneta_percpu_enable(void *arg)
+{
+	struct mvneta_port *pp = arg;
+
+	enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
+}
+
+static void mvneta_percpu_disable(void *arg)
+{
+	struct mvneta_port *pp = arg;
+
+	disable_percpu_irq(pp->dev->irq);
+}
+
+static void mvneta_percpu_elect(struct mvneta_port *pp)
+{
+	int online_cpu_idx, cpu, i = 0;
+
+	online_cpu_idx = rxq_def % num_online_cpus();
+
+	for_each_online_cpu(cpu) {
+		if (i == online_cpu_idx)
+			/* Enable per-CPU interrupt on the one CPU we
+			 * just elected
+			 */
+			smp_call_function_single(cpu, mvneta_percpu_enable,
+						pp, true);
+		else
+			/* Disable per-CPU interrupt on all the other CPU */
+			smp_call_function_single(cpu, mvneta_percpu_disable,
+						pp, true);
+		i++;
+	}
+};
+
+static int mvneta_percpu_notifier(struct notifier_block *nfb,
+				  unsigned long action, void *hcpu)
+{
+	struct mvneta_port *pp = container_of(nfb, struct mvneta_port,
+					      cpu_notifier);
+	int cpu = (unsigned long)hcpu, other_cpu;
+	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+		netif_tx_stop_all_queues(pp->dev);
+
+		/* We have to synchronise on tha napi of each CPU
+		 * except the one just being waked up
+		 */
+		for_each_online_cpu(other_cpu) {
+			if (other_cpu != cpu) {
+				struct mvneta_pcpu_port *other_port =
+					per_cpu_ptr(pp->ports, other_cpu);
+
+				napi_synchronize(&other_port->napi);
+			}
+		}
+
+		/* Mask all ethernet port interrupts */
+		mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+		mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+		mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+		napi_enable(&port->napi);
+
+		/* Enable per-CPU interrupt on the one CPU we care
+		 * about.
+		 */
+		mvneta_percpu_elect(pp);
+
+		/* Unmask all ethernet port interrupts */
+		mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+			MVNETA_RX_INTR_MASK(rxq_number) |
+			MVNETA_TX_INTR_MASK(txq_number) |
+			MVNETA_MISCINTR_INTR_MASK);
+		mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+			MVNETA_CAUSE_PHY_STATUS_CHANGE |
+			MVNETA_CAUSE_LINK_CHANGE |
+			MVNETA_CAUSE_PSC_SYNC_CHANGE);
+		netif_tx_start_all_queues(pp->dev);
+		break;
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		netif_tx_stop_all_queues(pp->dev);
+		/* Mask all ethernet port interrupts */
+		mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+		mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+		mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+
+		napi_synchronize(&port->napi);
+		napi_disable(&port->napi);
+		/* Disable per-CPU interrupts on the CPU that is
+		 * brought down.
+		 */
+		smp_call_function_single(cpu, mvneta_percpu_disable,
+					 pp, true);
+
+		break;
+	case CPU_DEAD:
+	case CPU_DEAD_FROZEN:
+		/* Check if a new CPU must be elected now this on is down */
+		mvneta_percpu_elect(pp);
+		/* Unmask all ethernet port interrupts */
+		mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+			MVNETA_RX_INTR_MASK(rxq_number) |
+			MVNETA_TX_INTR_MASK(txq_number) |
+			MVNETA_MISCINTR_INTR_MASK);
+		mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+			MVNETA_CAUSE_PHY_STATUS_CHANGE |
+			MVNETA_CAUSE_LINK_CHANGE |
+			MVNETA_CAUSE_PSC_SYNC_CHANGE);
+		netif_tx_start_all_queues(pp->dev);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
 static int mvneta_open(struct net_device *dev)
 {
 	struct mvneta_port *pp = netdev_priv(dev);
@@ -2707,13 +2796,29 @@
 		goto err_cleanup_rxqs;
 
 	/* Connect to port interrupt line */
-	ret = request_irq(pp->dev->irq, mvneta_isr, 0,
-			  MVNETA_DRIVER_NAME, pp);
+	ret = request_percpu_irq(pp->dev->irq, mvneta_isr,
+				 MVNETA_DRIVER_NAME, pp->ports);
 	if (ret) {
 		netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
 		goto err_cleanup_txqs;
 	}
 
+	/* Even though the documentation says that request_percpu_irq
+	 * doesn't enable the interrupts automatically, it actually
+	 * does so on the local CPU.
+	 *
+	 * Make sure it's disabled.
+	 */
+	mvneta_percpu_disable(pp);
+
+	/* Elect a CPU to handle our RX queue interrupt */
+	mvneta_percpu_elect(pp);
+
+	/* Register a CPU notifier to handle the case where our CPU
+	 * might be taken offline.
+	 */
+	register_cpu_notifier(&pp->cpu_notifier);
+
 	/* In default link is down */
 	netif_carrier_off(pp->dev);
 
@@ -2728,7 +2833,7 @@
 	return 0;
 
 err_free_irq:
-	free_irq(pp->dev->irq, pp);
+	free_percpu_irq(pp->dev->irq, pp->ports);
 err_cleanup_txqs:
 	mvneta_cleanup_txqs(pp);
 err_cleanup_rxqs:
@@ -2740,10 +2845,14 @@
 static int mvneta_stop(struct net_device *dev)
 {
 	struct mvneta_port *pp = netdev_priv(dev);
+	int cpu;
 
 	mvneta_stop_dev(pp);
 	mvneta_mdio_remove(pp);
-	free_irq(dev->irq, pp);
+	unregister_cpu_notifier(&pp->cpu_notifier);
+	for_each_present_cpu(cpu)
+		smp_call_function_single(cpu, mvneta_percpu_disable, pp, true);
+	free_percpu_irq(dev->irq, pp->ports);
 	mvneta_cleanup_rxqs(pp);
 	mvneta_cleanup_txqs(pp);
 
@@ -3030,14 +3139,7 @@
 	const char *managed;
 	int phy_mode;
 	int err;
-
-	/* Our multiqueue support is not complete, so for now, only
-	 * allow the usage of the first RX queue
-	 */
-	if (rxq_def != 0) {
-		dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
-		return -EINVAL;
-	}
+	int cpu;
 
 	dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
 	if (!dev)
@@ -3089,6 +3191,7 @@
 	err = of_property_read_string(dn, "managed", &managed);
 	pp->use_inband_status = (err == 0 &&
 				 strcmp(managed, "in-band-status") == 0);
+	pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
 
 	pp->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(pp->clk)) {
@@ -3105,11 +3208,18 @@
 		goto err_clk;
 	}
 
+	/* Alloc per-cpu port structure */
+	pp->ports = alloc_percpu(struct mvneta_pcpu_port);
+	if (!pp->ports) {
+		err = -ENOMEM;
+		goto err_clk;
+	}
+
 	/* Alloc per-cpu stats */
 	pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
 	if (!pp->stats) {
 		err = -ENOMEM;
-		goto err_clk;
+		goto err_free_ports;
 	}
 
 	dt_mac_addr = of_get_mac_address(dn);
@@ -3150,7 +3260,12 @@
 	if (dram_target_info)
 		mvneta_conf_mbus_windows(pp, dram_target_info);
 
-	netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+	for_each_present_cpu(cpu) {
+		struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+		netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+		port->pp = pp;
+	}
 
 	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
 	dev->hw_features |= dev->features;
@@ -3173,12 +3288,16 @@
 		struct phy_device *phy = of_phy_find_device(dn);
 
 		mvneta_fixed_link_update(pp, phy);
+
+		put_device(&phy->dev);
 	}
 
 	return 0;
 
 err_free_stats:
 	free_percpu(pp->stats);
+err_free_ports:
+	free_percpu(pp->ports);
 err_clk:
 	clk_disable_unprepare(pp->clk);
 err_put_phy_node:
@@ -3198,6 +3317,7 @@
 
 	unregister_netdev(dev);
 	clk_disable_unprepare(pp->clk);
+	free_percpu(pp->ports);
 	free_percpu(pp->stats);
 	irq_dispose_mapping(dev->irq);
 	of_node_put(pp->phy_node);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index d9f4498..5606a04 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4819,6 +4819,18 @@
 		memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
 			      ETH_ALEN);
 
+	/* if the address is invalid, use a random value */
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		struct sockaddr sa = { AF_UNSPEC };
+
+		netdev_warn(dev,
+			    "Invalid MAC address, defaulting to random\n");
+		eth_hw_addr_random(dev);
+		memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
+		if (sky2_set_mac_address(dev, &sa))
+			netdev_warn(dev, "Failed to set MAC address.\n");
+	}
+
 	return dev;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4726122..597d892 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -573,10 +573,8 @@
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_dev *dev = mdev->dev;
-	struct mlx4_mac_entry *entry;
 	int index = 0;
 	int err = 0;
-	u64 reg_id = 0;
 	int *qpn = &priv->base_qpn;
 	u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
 
@@ -600,44 +598,11 @@
 	en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
 	if (err) {
 		en_err(priv, "Failed to reserve qp for mac registration\n");
-		goto qp_err;
+		mlx4_unregister_mac(dev, priv->port, mac);
+		return err;
 	}
 
-	err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
-	if (err)
-		goto steer_err;
-
-	err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
-				       &priv->tunnel_reg_id);
-	if (err)
-		goto tunnel_err;
-
-	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
-	if (!entry) {
-		err = -ENOMEM;
-		goto alloc_err;
-	}
-	memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
-	memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
-	entry->reg_id = reg_id;
-
-	hlist_add_head_rcu(&entry->hlist,
-			   &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
-
 	return 0;
-
-alloc_err:
-	if (priv->tunnel_reg_id)
-		mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
-tunnel_err:
-	mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
-
-steer_err:
-	mlx4_qp_release_range(dev, *qpn, 1);
-
-qp_err:
-	mlx4_unregister_mac(dev, priv->port, mac);
-	return err;
 }
 
 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
@@ -645,39 +610,13 @@
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_dev *dev = mdev->dev;
 	int qpn = priv->base_qpn;
-	u64 mac;
 
 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
-		mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+		u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
 		en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
 		       priv->dev->dev_addr);
 		mlx4_unregister_mac(dev, priv->port, mac);
 	} else {
-		struct mlx4_mac_entry *entry;
-		struct hlist_node *tmp;
-		struct hlist_head *bucket;
-		unsigned int i;
-
-		for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
-			bucket = &priv->mac_hash[i];
-			hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
-				mac = mlx4_mac_to_u64(entry->mac);
-				en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
-				       entry->mac);
-				mlx4_en_uc_steer_release(priv, entry->mac,
-							 qpn, entry->reg_id);
-
-				mlx4_unregister_mac(dev, priv->port, mac);
-				hlist_del_rcu(&entry->hlist);
-				kfree_rcu(entry, rcu);
-			}
-		}
-
-		if (priv->tunnel_reg_id) {
-			mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
-			priv->tunnel_reg_id = 0;
-		}
-
 		en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
 		       priv->port, qpn);
 		mlx4_qp_release_range(dev, qpn, 1);
@@ -1283,6 +1222,75 @@
 }
 #endif
 
+static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
+{
+	u64 reg_id;
+	int err = 0;
+	int *qpn = &priv->base_qpn;
+	struct mlx4_mac_entry *entry;
+
+	err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
+	if (err)
+		return err;
+
+	err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
+				       &priv->tunnel_reg_id);
+	if (err)
+		goto tunnel_err;
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		err = -ENOMEM;
+		goto alloc_err;
+	}
+
+	memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
+	memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
+	entry->reg_id = reg_id;
+	hlist_add_head_rcu(&entry->hlist,
+			   &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
+
+	return 0;
+
+alloc_err:
+	if (priv->tunnel_reg_id)
+		mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+
+tunnel_err:
+	mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
+	return err;
+}
+
+static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
+{
+	u64 mac;
+	unsigned int i;
+	int qpn = priv->base_qpn;
+	struct hlist_head *bucket;
+	struct hlist_node *tmp;
+	struct mlx4_mac_entry *entry;
+
+	for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+		bucket = &priv->mac_hash[i];
+		hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
+			mac = mlx4_mac_to_u64(entry->mac);
+			en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
+			       entry->mac);
+			mlx4_en_uc_steer_release(priv, entry->mac,
+						 qpn, entry->reg_id);
+
+			mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
+			hlist_del_rcu(&entry->hlist);
+			kfree_rcu(entry, rcu);
+		}
+	}
+
+	if (priv->tunnel_reg_id) {
+		mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+		priv->tunnel_reg_id = 0;
+	}
+}
+
 static void mlx4_en_tx_timeout(struct net_device *dev)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1684,6 +1692,11 @@
 		goto tx_err;
 	}
 
+	/* Set Unicast and VXLAN steering rules */
+	if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
+	    mlx4_en_set_rss_steer_rules(priv))
+		mlx4_warn(mdev, "Failed setting steering rules\n");
+
 	/* Attach rx QP to bradcast address */
 	eth_broadcast_addr(&mc_list[10]);
 	mc_list[5] = priv->port; /* needed for B0 steering support */
@@ -1831,6 +1844,9 @@
 	for (i = 0; i < priv->tx_ring_num; i++)
 		mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
 
+	if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+		mlx4_en_delete_rss_steer_rules(priv);
+
 	/* Free RSS qps */
 	mlx4_en_release_rss_steer(priv);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 4402a1e..e7a5000 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1047,13 +1047,15 @@
 
 	/* If we used up all the quota - we're probably not done yet... */
 	if (done == budget) {
-		int cpu_curr;
 		const struct cpumask *aff;
+		struct irq_data *idata;
+		int cpu_curr;
 
 		INC_PERF_COUNTER(priv->pstats.napi_quota);
 
 		cpu_curr = smp_processor_id();
-		aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
+		idata = irq_desc_get_irq_data(cq->irq_desc);
+		aff = irq_data_get_affinity_mask(idata);
 
 		if (likely(cpumask_test_cpu(cpu_curr, aff)))
 			return budget;
@@ -1268,8 +1270,6 @@
 		rss_context->hash_fn = MLX4_RSS_HASH_TOP;
 		memcpy(rss_context->rss_key, priv->rss_key,
 		       MLX4_EN_RSS_KEY_SIZE);
-		netdev_rss_key_fill(rss_context->rss_key,
-				    MLX4_EN_RSS_KEY_SIZE);
 	} else {
 		en_err(priv, "Unknown RSS hash function requested\n");
 		err = -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index bd9ea0d..1d4e2e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1184,10 +1184,11 @@
 	if (prot == MLX4_PROT_ETH) {
 		/* manage the steering entry for promisc mode */
 		if (new_entry)
-			new_steering_entry(dev, port, steer, index, qp->qpn);
+			err = new_steering_entry(dev, port, steer,
+						 index, qp->qpn);
 		else
-			existing_steering_entry(dev, port, steer,
-						index, qp->qpn);
+			err = existing_steering_entry(dev, port, steer,
+						      index, qp->qpn);
 	}
 	if (err && link && index != -1) {
 		if (index < dev->caps.num_mgms)
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2026863..3311f35 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -422,15 +422,15 @@
 	u64 qp_mask = 0;
 	int err = 0;
 
+	if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+		return -EINVAL;
+
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 
 	cmd = (struct mlx4_update_qp_context *)mailbox->buf;
 
-	if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
-		return -EINVAL;
-
 	if (attr & MLX4_UPDATE_QP_SMAC) {
 		pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
 		cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 731423c..ac4b99a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1238,8 +1238,10 @@
 	return 0;
 
 undo:
-	for (--i; i >= base; --i)
+	for (--i; i >= 0; --i) {
 		rb_erase(&res_arr[i]->node, root);
+		list_del_init(&res_arr[i]->list);
+	}
 
 	spin_unlock_irq(mlx4_tlock(dev));
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 75ff58d..c3e54b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -254,6 +254,10 @@
 		pr_debug("\n");
 }
 
+enum {
+	MLX5_DRIVER_STATUS_ABORTED = 0xfe,
+};
+
 const char *mlx5_command_str(int command)
 {
 	switch (command) {
@@ -473,6 +477,7 @@
 	struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
 	struct mlx5_cmd_layout *lay;
 	struct semaphore *sem;
+	unsigned long flags;
 
 	sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
 	down(sem);
@@ -485,6 +490,9 @@
 		}
 	} else {
 		ent->idx = cmd->max_reg_cmds;
+		spin_lock_irqsave(&cmd->alloc_lock, flags);
+		clear_bit(ent->idx, &cmd->bitmask);
+		spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 	}
 
 	ent->token = alloc_token(cmd);
@@ -1081,7 +1089,7 @@
 	}
 }
 
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
 {
 	struct mlx5_cmd *cmd = &dev->cmd;
 	struct mlx5_cmd_work_ent *ent;
@@ -1092,7 +1100,10 @@
 	s64 ds;
 	struct mlx5_cmd_stats *stats;
 	unsigned long flags;
+	unsigned long vector;
 
+	/* there can be at most 32 command queues */
+	vector = vec & 0xffffffff;
 	for (i = 0; i < (1 << cmd->log_sz); i++) {
 		if (test_bit(i, &vector)) {
 			struct semaphore *sem;
@@ -1110,11 +1121,16 @@
 					ent->ret = verify_signature(ent);
 				else
 					ent->ret = 0;
-				ent->status = ent->lay->status_own >> 1;
+				if (vec & MLX5_TRIGGERED_CMD_COMP)
+					ent->status = MLX5_DRIVER_STATUS_ABORTED;
+				else
+					ent->status = ent->lay->status_own >> 1;
+
 				mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
 					      ent->ret, deliv_status_to_str(ent->status), ent->status);
 			}
 			free_ent(cmd, ent->idx);
+
 			if (ent->callback) {
 				ds = ent->ts2 - ent->ts1;
 				if (ent->op < ARRAY_SIZE(cmd->stats)) {
@@ -1136,6 +1152,7 @@
 				mlx5_free_cmd_msg(dev, ent->out);
 				free_msg(dev, ent->in);
 
+				err = err ? err : ent->status;
 				free_cmd(ent);
 				callback(err, context);
 			} else {
@@ -1363,6 +1380,7 @@
 	int err;
 	int i;
 
+	memset(cmd, 0, sizeof(*cmd));
 	cmd_if_rev = cmdif_rev(dev);
 	if (cmd_if_rev != CMD_IF_REV) {
 		dev_err(&dev->pdev->dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 04ab7e4..b51e42d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -242,6 +242,7 @@
 	struct mlx5_cq_table *table = &dev->priv.cq_table;
 	int err;
 
+	memset(table, 0, sizeof(*table));
 	spin_lock_init(&table->lock);
 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
 	err = mlx5_cq_debugfs_init(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 59874d6..bb801a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1367,13 +1367,13 @@
 
 	err = mlx5e_set_dev_port_mtu(netdev);
 	if (err)
-		return err;
+		goto err_clear_state_opened_flag;
 
 	err = mlx5e_open_channels(priv);
 	if (err) {
 		netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
 			   __func__, err);
-		return err;
+		goto err_clear_state_opened_flag;
 	}
 
 	mlx5e_update_carrier(priv);
@@ -1382,6 +1382,10 @@
 	schedule_delayed_work(&priv->update_stats_work, 0);
 
 	return 0;
+
+err_clear_state_opened_flag:
+	clear_bit(MLX5E_STATE_OPENED, &priv->state);
+	return err;
 }
 
 static int mlx5e_open(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index a40b96d..1f01fe8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -346,6 +346,7 @@
 	int inlen;
 
 	eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
+	eq->cons_index = 0;
 	err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
 	if (err)
 		return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index aa0d5ff..9335e5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -200,25 +200,3 @@
 
 	return err;
 }
-
-int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey)
-{
-	struct mlx5_cmd_query_special_contexts_mbox_in in;
-	struct mlx5_cmd_query_special_contexts_mbox_out out;
-	int err;
-
-	memset(&in, 0, sizeof(in));
-	memset(&out, 0, sizeof(out));
-	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
-	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
-	if (err)
-		return err;
-
-	if (out.hdr.status)
-		err = mlx5_cmd_status_to_err(&out.hdr);
-
-	*rsvd_lkey = be32_to_cpu(out.resd_lkey);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_query_special_context);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 292d76f..9b81e1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -46,39 +46,27 @@
 enum {
 	MLX5_HEALTH_SYNDR_FW_ERR		= 0x1,
 	MLX5_HEALTH_SYNDR_IRISC_ERR		= 0x7,
+	MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR	= 0x8,
 	MLX5_HEALTH_SYNDR_CRC_ERR		= 0x9,
 	MLX5_HEALTH_SYNDR_FETCH_PCI_ERR		= 0xa,
 	MLX5_HEALTH_SYNDR_HW_FTL_ERR		= 0xb,
 	MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR	= 0xc,
 	MLX5_HEALTH_SYNDR_EQ_ERR		= 0xd,
+	MLX5_HEALTH_SYNDR_EQ_INV		= 0xe,
 	MLX5_HEALTH_SYNDR_FFSER_ERR		= 0xf,
+	MLX5_HEALTH_SYNDR_HIGH_TEMP		= 0x10
 };
 
-static DEFINE_SPINLOCK(health_lock);
-static LIST_HEAD(health_list);
-static struct work_struct health_work;
-
 static void health_care(struct work_struct *work)
 {
-	struct mlx5_core_health *health, *n;
+	struct mlx5_core_health *health;
 	struct mlx5_core_dev *dev;
 	struct mlx5_priv *priv;
-	LIST_HEAD(tlist);
 
-	spin_lock_irq(&health_lock);
-	list_splice_init(&health_list, &tlist);
-
-	spin_unlock_irq(&health_lock);
-
-	list_for_each_entry_safe(health, n, &tlist, list) {
-		priv = container_of(health, struct mlx5_priv, health);
-		dev = container_of(priv, struct mlx5_core_dev, priv);
-		mlx5_core_warn(dev, "handling bad device here\n");
-		/* nothing yet */
-		spin_lock_irq(&health_lock);
-		list_del_init(&health->list);
-		spin_unlock_irq(&health_lock);
-	}
+	health = container_of(work, struct mlx5_core_health, work);
+	priv = container_of(health, struct mlx5_priv, health);
+	dev = container_of(priv, struct mlx5_core_dev, priv);
+	mlx5_core_warn(dev, "handling bad device here\n");
 }
 
 static const char *hsynd_str(u8 synd)
@@ -88,6 +76,8 @@
 		return "firmware internal error";
 	case MLX5_HEALTH_SYNDR_IRISC_ERR:
 		return "irisc not responding";
+	case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
+		return "unrecoverable hardware error";
 	case MLX5_HEALTH_SYNDR_CRC_ERR:
 		return "firmware CRC error";
 	case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
@@ -98,39 +88,52 @@
 		return "async EQ buffer overrun";
 	case MLX5_HEALTH_SYNDR_EQ_ERR:
 		return "EQ error";
+	case MLX5_HEALTH_SYNDR_EQ_INV:
+		return "Invalid EQ refrenced";
 	case MLX5_HEALTH_SYNDR_FFSER_ERR:
 		return "FFSER error";
+	case MLX5_HEALTH_SYNDR_HIGH_TEMP:
+		return "High temprature";
 	default:
 		return "unrecognized error";
 	}
 }
 
-static u16 read_be16(__be16 __iomem *p)
+static u16 get_maj(u32 fw)
 {
-	return swab16(readl((__force u16 __iomem *) p));
+	return fw >> 28;
 }
 
-static u32 read_be32(__be32 __iomem *p)
+static u16 get_min(u32 fw)
 {
-	return swab32(readl((__force u32 __iomem *) p));
+	return fw >> 16 & 0xfff;
+}
+
+static u16 get_sub(u32 fw)
+{
+	return fw & 0xffff;
 }
 
 static void print_health_info(struct mlx5_core_dev *dev)
 {
 	struct mlx5_core_health *health = &dev->priv.health;
 	struct health_buffer __iomem *h = health->health;
+	char fw_str[18];
+	u32 fw;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
-		pr_info("assert_var[%d] 0x%08x\n", i, read_be32(h->assert_var + i));
+		dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
 
-	pr_info("assert_exit_ptr 0x%08x\n", read_be32(&h->assert_exit_ptr));
-	pr_info("assert_callra 0x%08x\n", read_be32(&h->assert_callra));
-	pr_info("fw_ver 0x%08x\n", read_be32(&h->fw_ver));
-	pr_info("hw_id 0x%08x\n", read_be32(&h->hw_id));
-	pr_info("irisc_index %d\n", readb(&h->irisc_index));
-	pr_info("synd 0x%x: %s\n", readb(&h->synd), hsynd_str(readb(&h->synd)));
-	pr_info("ext_sync 0x%04x\n", read_be16(&h->ext_sync));
+	dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
+	dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
+	fw = ioread32be(&h->fw_ver);
+	sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw));
+	dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
+	dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
+	dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
+	dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
+	dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
 }
 
 static void poll_health(unsigned long data)
@@ -150,11 +153,7 @@
 	if (health->miss_counter == MAX_MISSES) {
 		mlx5_core_err(dev, "device's health compromised\n");
 		print_health_info(dev);
-		spin_lock_irq(&health_lock);
-		list_add_tail(&health->list, &health_list);
-		spin_unlock_irq(&health_lock);
-
-		queue_work(mlx5_core_wq, &health_work);
+		queue_work(health->wq, &health->work);
 	} else {
 		get_random_bytes(&next, sizeof(next));
 		next %= HZ;
@@ -167,7 +166,6 @@
 {
 	struct mlx5_core_health *health = &dev->priv.health;
 
-	INIT_LIST_HEAD(&health->list);
 	init_timer(&health->timer);
 	health->health = &dev->iseg->health;
 	health->health_counter = &dev->iseg->health_counter;
@@ -183,18 +181,33 @@
 	struct mlx5_core_health *health = &dev->priv.health;
 
 	del_timer_sync(&health->timer);
-
-	spin_lock_irq(&health_lock);
-	if (!list_empty(&health->list))
-		list_del_init(&health->list);
-	spin_unlock_irq(&health_lock);
 }
 
-void mlx5_health_cleanup(void)
+void mlx5_health_cleanup(struct mlx5_core_dev *dev)
 {
+	struct mlx5_core_health *health = &dev->priv.health;
+
+	destroy_workqueue(health->wq);
 }
 
-void  __init mlx5_health_init(void)
+int mlx5_health_init(struct mlx5_core_dev *dev)
 {
-	INIT_WORK(&health_work, health_care);
+	struct mlx5_core_health *health;
+	char *name;
+
+	health = &dev->priv.health;
+	name = kmalloc(64, GFP_KERNEL);
+	if (!name)
+		return -ENOMEM;
+
+	strcpy(name, "mlx5_health");
+	strcat(name, dev_name(&dev->pdev->dev));
+	health->wq = create_singlethread_workqueue(name);
+	kfree(name);
+	if (!health->wq)
+		return -ENOMEM;
+
+	INIT_WORK(&health->work, health_care);
+
+	return 0;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 03aabdd..b6edc58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -62,7 +62,6 @@
 module_param_named(prof_sel, prof_sel, int, 0444);
 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
 
-struct workqueue_struct *mlx5_core_wq;
 static LIST_HEAD(intf_list);
 static LIST_HEAD(dev_list);
 static DEFINE_MUTEX(intf_mutex);
@@ -672,12 +671,126 @@
 		io_mapping_free(dev->priv.bf_mapping);
 }
 
-static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
+static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+	struct mlx5_device_context *dev_ctx;
+	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+	dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
+	if (!dev_ctx)
+		return;
+
+	dev_ctx->intf    = intf;
+	dev_ctx->context = intf->add(dev);
+
+	if (dev_ctx->context) {
+		spin_lock_irq(&priv->ctx_lock);
+		list_add_tail(&dev_ctx->list, &priv->ctx_list);
+		spin_unlock_irq(&priv->ctx_lock);
+	} else {
+		kfree(dev_ctx);
+	}
+}
+
+static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+	struct mlx5_device_context *dev_ctx;
+	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+		if (dev_ctx->intf == intf) {
+			spin_lock_irq(&priv->ctx_lock);
+			list_del(&dev_ctx->list);
+			spin_unlock_irq(&priv->ctx_lock);
+
+			intf->remove(dev, dev_ctx->context);
+			kfree(dev_ctx);
+			return;
+		}
+}
+
+static int mlx5_register_device(struct mlx5_core_dev *dev)
 {
 	struct mlx5_priv *priv = &dev->priv;
-	int err;
+	struct mlx5_interface *intf;
 
-	dev->pdev = pdev;
+	mutex_lock(&intf_mutex);
+	list_add_tail(&priv->dev_list, &dev_list);
+	list_for_each_entry(intf, &intf_list, list)
+		mlx5_add_device(intf, priv);
+	mutex_unlock(&intf_mutex);
+
+	return 0;
+}
+
+static void mlx5_unregister_device(struct mlx5_core_dev *dev)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	struct mlx5_interface *intf;
+
+	mutex_lock(&intf_mutex);
+	list_for_each_entry(intf, &intf_list, list)
+		mlx5_remove_device(intf, priv);
+	list_del(&priv->dev_list);
+	mutex_unlock(&intf_mutex);
+}
+
+int mlx5_register_interface(struct mlx5_interface *intf)
+{
+	struct mlx5_priv *priv;
+
+	if (!intf->add || !intf->remove)
+		return -EINVAL;
+
+	mutex_lock(&intf_mutex);
+	list_add_tail(&intf->list, &intf_list);
+	list_for_each_entry(priv, &dev_list, dev_list)
+		mlx5_add_device(intf, priv);
+	mutex_unlock(&intf_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(mlx5_register_interface);
+
+void mlx5_unregister_interface(struct mlx5_interface *intf)
+{
+	struct mlx5_priv *priv;
+
+	mutex_lock(&intf_mutex);
+	list_for_each_entry(priv, &dev_list, dev_list)
+		mlx5_remove_device(intf, priv);
+	list_del(&intf->list);
+	mutex_unlock(&intf_mutex);
+}
+EXPORT_SYMBOL(mlx5_unregister_interface);
+
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
+{
+	struct mlx5_priv *priv = &mdev->priv;
+	struct mlx5_device_context *dev_ctx;
+	unsigned long flags;
+	void *result = NULL;
+
+	spin_lock_irqsave(&priv->ctx_lock, flags);
+
+	list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
+		if ((dev_ctx->intf->protocol == protocol) &&
+		    dev_ctx->intf->get_dev) {
+			result = dev_ctx->intf->get_dev(dev_ctx->context);
+			break;
+		}
+
+	spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+	return result;
+}
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
+
+static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+	struct pci_dev *pdev = dev->pdev;
+	int err = 0;
+
 	pci_set_drvdata(dev->pdev, dev);
 	strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
 	priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
@@ -721,13 +834,42 @@
 		dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
 		goto err_clr_master;
 	}
+
+	return 0;
+
+err_clr_master:
+	pci_clear_master(dev->pdev);
+	release_bar(dev->pdev);
+err_disable:
+	pci_disable_device(dev->pdev);
+
+err_dbg:
+	debugfs_remove(priv->dbg_root);
+	return err;
+}
+
+static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+	iounmap(dev->iseg);
+	pci_clear_master(dev->pdev);
+	release_bar(dev->pdev);
+	pci_disable_device(dev->pdev);
+	debugfs_remove(priv->dbg_root);
+}
+
+#define MLX5_IB_MOD "mlx5_ib"
+static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+	struct pci_dev *pdev = dev->pdev;
+	int err;
+
 	dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
 		 fw_rev_min(dev), fw_rev_sub(dev));
 
 	err = mlx5_cmd_init(dev);
 	if (err) {
 		dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
-		goto err_unmap;
+		return err;
 	}
 
 	mlx5_pagealloc_init(dev);
@@ -842,8 +984,25 @@
 	mlx5_init_srq_table(dev);
 	mlx5_init_mr_table(dev);
 
+	err = mlx5_register_device(dev);
+	if (err) {
+		dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
+		goto err_reg_dev;
+	}
+
+	err = request_module_nowait(MLX5_IB_MOD);
+	if (err)
+		pr_info("failed request module on %s\n", MLX5_IB_MOD);
+
 	return 0;
 
+err_reg_dev:
+	mlx5_cleanup_mr_table(dev);
+	mlx5_cleanup_srq_table(dev);
+	mlx5_cleanup_qp_table(dev);
+	mlx5_cleanup_cq_table(dev);
+	mlx5_irq_clear_affinity_hints(dev);
+
 err_unmap_bf_area:
 	unmap_bf_area(dev);
 
@@ -881,25 +1040,15 @@
 	mlx5_pagealloc_cleanup(dev);
 	mlx5_cmd_cleanup(dev);
 
-err_unmap:
-	iounmap(dev->iseg);
-
-err_clr_master:
-	pci_clear_master(dev->pdev);
-	release_bar(dev->pdev);
-
-err_disable:
-	pci_disable_device(dev->pdev);
-
-err_dbg:
-	debugfs_remove(priv->dbg_root);
 	return err;
 }
 
-static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
+static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
 {
-	struct mlx5_priv *priv = &dev->priv;
+	int err;
 
+	mlx5_unregister_device(dev);
+	mlx5_cleanup_mr_table(dev);
 	mlx5_cleanup_srq_table(dev);
 	mlx5_cleanup_qp_table(dev);
 	mlx5_cleanup_cq_table(dev);
@@ -911,139 +1060,23 @@
 	mlx5_eq_cleanup(dev);
 	mlx5_disable_msix(dev);
 	mlx5_stop_health_poll(dev);
-	if (mlx5_cmd_teardown_hca(dev)) {
+	err = mlx5_cmd_teardown_hca(dev);
+	if (err) {
 		dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
-		return;
+		goto out;
 	}
 	mlx5_pagealloc_stop(dev);
 	mlx5_reclaim_startup_pages(dev);
 	mlx5_core_disable_hca(dev);
 	mlx5_pagealloc_cleanup(dev);
 	mlx5_cmd_cleanup(dev);
-	iounmap(dev->iseg);
-	pci_clear_master(dev->pdev);
-	release_bar(dev->pdev);
-	pci_disable_device(dev->pdev);
-	debugfs_remove(priv->dbg_root);
+
+out:
+	return err;
 }
 
-static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
-	struct mlx5_device_context *dev_ctx;
-	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
-	dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
-	if (!dev_ctx) {
-		pr_warn("mlx5_add_device: alloc context failed\n");
-		return;
-	}
-
-	dev_ctx->intf    = intf;
-	dev_ctx->context = intf->add(dev);
-
-	if (dev_ctx->context) {
-		spin_lock_irq(&priv->ctx_lock);
-		list_add_tail(&dev_ctx->list, &priv->ctx_list);
-		spin_unlock_irq(&priv->ctx_lock);
-	} else {
-		kfree(dev_ctx);
-	}
-}
-
-static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
-	struct mlx5_device_context *dev_ctx;
-	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
-	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
-		if (dev_ctx->intf == intf) {
-			spin_lock_irq(&priv->ctx_lock);
-			list_del(&dev_ctx->list);
-			spin_unlock_irq(&priv->ctx_lock);
-
-			intf->remove(dev, dev_ctx->context);
-			kfree(dev_ctx);
-			return;
-		}
-}
-static int mlx5_register_device(struct mlx5_core_dev *dev)
-{
-	struct mlx5_priv *priv = &dev->priv;
-	struct mlx5_interface *intf;
-
-	mutex_lock(&intf_mutex);
-	list_add_tail(&priv->dev_list, &dev_list);
-	list_for_each_entry(intf, &intf_list, list)
-		mlx5_add_device(intf, priv);
-	mutex_unlock(&intf_mutex);
-
-	return 0;
-}
-static void mlx5_unregister_device(struct mlx5_core_dev *dev)
-{
-	struct mlx5_priv *priv = &dev->priv;
-	struct mlx5_interface *intf;
-
-	mutex_lock(&intf_mutex);
-	list_for_each_entry(intf, &intf_list, list)
-		mlx5_remove_device(intf, priv);
-	list_del(&priv->dev_list);
-	mutex_unlock(&intf_mutex);
-}
-
-int mlx5_register_interface(struct mlx5_interface *intf)
-{
-	struct mlx5_priv *priv;
-
-	if (!intf->add || !intf->remove)
-		return -EINVAL;
-
-	mutex_lock(&intf_mutex);
-	list_add_tail(&intf->list, &intf_list);
-	list_for_each_entry(priv, &dev_list, dev_list)
-		mlx5_add_device(intf, priv);
-	mutex_unlock(&intf_mutex);
-
-	return 0;
-}
-EXPORT_SYMBOL(mlx5_register_interface);
-
-void mlx5_unregister_interface(struct mlx5_interface *intf)
-{
-	struct mlx5_priv *priv;
-
-	mutex_lock(&intf_mutex);
-	list_for_each_entry(priv, &dev_list, dev_list)
-	       mlx5_remove_device(intf, priv);
-	list_del(&intf->list);
-	mutex_unlock(&intf_mutex);
-}
-EXPORT_SYMBOL(mlx5_unregister_interface);
-
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
-{
-	struct mlx5_priv *priv = &mdev->priv;
-	struct mlx5_device_context *dev_ctx;
-	unsigned long flags;
-	void *result = NULL;
-
-	spin_lock_irqsave(&priv->ctx_lock, flags);
-
-	list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
-		if ((dev_ctx->intf->protocol == protocol) &&
-		    dev_ctx->intf->get_dev) {
-			result = dev_ctx->intf->get_dev(dev_ctx->context);
-			break;
-		}
-
-	spin_unlock_irqrestore(&priv->ctx_lock, flags);
-
-	return result;
-}
-EXPORT_SYMBOL(mlx5_get_protocol_dev);
-
 static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
-			    unsigned long param)
+		     unsigned long param)
 {
 	struct mlx5_priv *priv = &dev->priv;
 	struct mlx5_device_context *dev_ctx;
@@ -1064,7 +1097,6 @@
 		      void *data);
 };
 
-#define MLX5_IB_MOD "mlx5_ib"
 
 static int init_one(struct pci_dev *pdev,
 		    const struct pci_device_id *id)
@@ -1088,40 +1120,55 @@
 		prof_sel = MLX5_DEFAULT_PROF;
 	}
 	dev->profile = &profile[prof_sel];
+	dev->pdev = pdev;
 	dev->event = mlx5_core_event;
 
 	INIT_LIST_HEAD(&priv->ctx_list);
 	spin_lock_init(&priv->ctx_lock);
-	err = mlx5_dev_init(dev, pdev);
+	err = mlx5_pci_init(dev, priv);
 	if (err) {
-		dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
-		goto out;
+		dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
+		goto clean_dev;
 	}
 
-	err = mlx5_register_device(dev);
+	err = mlx5_health_init(dev);
 	if (err) {
-		dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
-		goto out_init;
+		dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
+		goto close_pci;
 	}
 
-	err = request_module_nowait(MLX5_IB_MOD);
-	if (err)
-		pr_info("failed request module on %s\n", MLX5_IB_MOD);
+	err = mlx5_load_one(dev, priv);
+	if (err) {
+		dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
+		goto clean_health;
+	}
 
 	return 0;
 
-out_init:
-	mlx5_dev_cleanup(dev);
-out:
+clean_health:
+	mlx5_health_cleanup(dev);
+close_pci:
+	mlx5_pci_close(dev, priv);
+clean_dev:
+	pci_set_drvdata(pdev, NULL);
 	kfree(dev);
+
 	return err;
 }
+
 static void remove_one(struct pci_dev *pdev)
 {
 	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+	struct mlx5_priv *priv = &dev->priv;
 
-	mlx5_unregister_device(dev);
-	mlx5_dev_cleanup(dev);
+	if (mlx5_unload_one(dev, priv)) {
+		dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
+		mlx5_health_cleanup(dev);
+		return;
+	}
+	mlx5_health_cleanup(dev);
+	mlx5_pci_close(dev, priv);
+	pci_set_drvdata(pdev, NULL);
 	kfree(dev);
 }
 
@@ -1149,16 +1196,10 @@
 	int err;
 
 	mlx5_register_debugfs();
-	mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
-	if (!mlx5_core_wq) {
-		err = -ENOMEM;
-		goto err_debug;
-	}
-	mlx5_health_init();
 
 	err = pci_register_driver(&mlx5_core_driver);
 	if (err)
-		goto err_health;
+		goto err_debug;
 
 #ifdef CONFIG_MLX5_CORE_EN
 	mlx5e_init();
@@ -1166,9 +1207,6 @@
 
 	return 0;
 
-err_health:
-	mlx5_health_cleanup();
-	destroy_workqueue(mlx5_core_wq);
 err_debug:
 	mlx5_unregister_debugfs();
 	return err;
@@ -1180,8 +1218,6 @@
 	mlx5e_cleanup();
 #endif
 	pci_unregister_driver(&mlx5_core_driver);
-	mlx5_health_cleanup();
-	destroy_workqueue(mlx5_core_wq);
 	mlx5_unregister_debugfs();
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 566a704..30c0be7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -43,25 +43,25 @@
 
 extern int mlx5_core_debug_mask;
 
-#define mlx5_core_dbg(dev, format, ...)					\
-	pr_debug("%s:%s:%d:(pid %d): " format,				\
-		 (dev)->priv.name, __func__, __LINE__, current->pid,	\
+#define mlx5_core_dbg(__dev, format, ...)				\
+	dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format,	\
+		 (__dev)->priv.name, __func__, __LINE__, current->pid,	\
 		 ##__VA_ARGS__)
 
-#define mlx5_core_dbg_mask(dev, mask, format, ...)			\
+#define mlx5_core_dbg_mask(__dev, mask, format, ...)			\
 do {									\
 	if ((mask) & mlx5_core_debug_mask)				\
-		mlx5_core_dbg(dev, format, ##__VA_ARGS__);		\
+		mlx5_core_dbg(__dev, format, ##__VA_ARGS__);		\
 } while (0)
 
-#define mlx5_core_err(dev, format, ...)					\
-	pr_err("%s:%s:%d:(pid %d): " format,				\
-	       (dev)->priv.name, __func__, __LINE__, current->pid,	\
+#define mlx5_core_err(__dev, format, ...)				\
+	dev_err(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format,	\
+	       (__dev)->priv.name, __func__, __LINE__, current->pid,	\
 	       ##__VA_ARGS__)
 
-#define mlx5_core_warn(dev, format, ...)				\
-	pr_warn("%s:%s:%d:(pid %d): " format,				\
-		(dev)->priv.name, __func__, __LINE__, current->pid,	\
+#define mlx5_core_warn(__dev, format, ...)				\
+	dev_warn(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format,	\
+		(__dev)->priv.name, __func__, __LINE__, current->pid,	\
 		##__VA_ARGS__)
 
 enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 1adb300..6fa22b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -40,6 +40,7 @@
 {
 	struct mlx5_mr_table *table = &dev->priv.mr_table;
 
+	memset(table, 0, sizeof(*table));
 	rwlock_init(&table->lock);
 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 8a64542..76432a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -275,12 +275,36 @@
 
 	return err;
 }
+
+static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
+{
+	struct mlx5_manage_pages_inbox *in;
+	struct mlx5_manage_pages_outbox out;
+	int err;
+
+	in = kzalloc(sizeof(*in), GFP_KERNEL);
+	if (!in)
+		return;
+
+	memset(&out, 0, sizeof(out));
+	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
+	in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
+	in->func_id = cpu_to_be16(func_id);
+	err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
+	if (!err)
+		err = mlx5_cmd_status_to_err(&out.hdr);
+
+	if (err)
+		mlx5_core_warn(dev, "page notify failed\n");
+
+	kfree(in);
+}
+
 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
 		      int notify_fail)
 {
 	struct mlx5_manage_pages_inbox *in;
 	struct mlx5_manage_pages_outbox out;
-	struct mlx5_manage_pages_inbox *nin;
 	int inlen;
 	u64 addr;
 	int err;
@@ -289,8 +313,9 @@
 	inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
 	in = mlx5_vzalloc(inlen);
 	if (!in) {
+		err = -ENOMEM;
 		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
-		return -ENOMEM;
+		goto out_free;
 	}
 	memset(&out, 0, sizeof(out));
 
@@ -316,43 +341,29 @@
 	if (err) {
 		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
 			       func_id, npages, err);
-		goto out_alloc;
+		goto out_4k;
 	}
 	dev->priv.fw_pages += npages;
 
-	if (out.hdr.status) {
-		err = mlx5_cmd_status_to_err(&out.hdr);
-		if (err) {
-			mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
-				       func_id, npages, out.hdr.status);
-			goto out_alloc;
-		}
+	err = mlx5_cmd_status_to_err(&out.hdr);
+	if (err) {
+		mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
+			       func_id, npages, out.hdr.status);
+		goto out_4k;
 	}
 
 	mlx5_core_dbg(dev, "err %d\n", err);
 
-	goto out_free;
-
-out_alloc:
-	if (notify_fail) {
-		nin = kzalloc(sizeof(*nin), GFP_KERNEL);
-		if (!nin) {
-			mlx5_core_warn(dev, "allocation failed\n");
-			goto out_4k;
-		}
-		memset(&out, 0, sizeof(out));
-		nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
-		nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
-		if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
-			mlx5_core_warn(dev, "page notify failed\n");
-		kfree(nin);
-	}
+	kvfree(in);
+	return 0;
 
 out_4k:
 	for (i--; i >= 0; i--)
 		free_4k(dev, be64_to_cpu(in->pas[i]));
 out_free:
 	kvfree(in);
+	if (notify_fail)
+		page_notify_fail(dev, func_id);
 	return err;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 821caaa..ae30261 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -90,16 +90,13 @@
 {
 	struct mlx5_reg_pcap in;
 	struct mlx5_reg_pcap out;
-	int err;
 
 	memset(&in, 0, sizeof(in));
 	in.caps_127_96 = cpu_to_be32(caps);
 	in.port_num = port_num;
 
-	err = mlx5_core_access_reg(dev, &in, sizeof(in), &out,
-				   sizeof(out), MLX5_REG_PCAP, 0, 1);
-
-	return err;
+	return mlx5_core_access_reg(dev, &in, sizeof(in), &out,
+				    sizeof(out), MLX5_REG_PCAP, 0, 1);
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
 
@@ -107,16 +104,13 @@
 			 int ptys_size, int proto_mask, u8 local_port)
 {
 	u32 in[MLX5_ST_SZ_DW(ptys_reg)];
-	int err;
 
 	memset(in, 0, sizeof(in));
 	MLX5_SET(ptys_reg, in, local_port, local_port);
 	MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
 
-	err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
-				   ptys_size, MLX5_REG_PTYS, 0, 0);
-
-	return err;
+	return mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+				    ptys_size, MLX5_REG_PTYS, 0, 0);
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
 
@@ -199,7 +193,6 @@
 {
 	u32 in[MLX5_ST_SZ_DW(ptys_reg)];
 	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
-	int err;
 
 	memset(in, 0, sizeof(in));
 
@@ -210,9 +203,8 @@
 	else
 		MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
 
-	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
-				   sizeof(out), MLX5_REG_PTYS, 0, 1);
-	return err;
+	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+				    sizeof(out), MLX5_REG_PTYS, 0, 1);
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
 
@@ -250,7 +242,7 @@
 		return err;
 
 	*status = MLX5_GET(paos_reg, out, admin_status);
-	return err;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
 
@@ -308,15 +300,12 @@
 				int pvlc_size,  u8 local_port)
 {
 	u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
-	int err;
 
 	memset(in, 0, sizeof(in));
 	MLX5_SET(ptys_reg, in, local_port, local_port);
 
-	err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
-				   pvlc_size, MLX5_REG_PVLC, 0, 0);
-
-	return err;
+	return mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
+				    pvlc_size, MLX5_REG_PVLC, 0, 0);
 }
 
 int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
@@ -339,16 +328,14 @@
 {
 	u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
 	u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
-	int err;
 
 	memset(in, 0, sizeof(in));
 	MLX5_SET(pfcc_reg, in, local_port, 1);
 	MLX5_SET(pfcc_reg, in, pptx, tx_pause);
 	MLX5_SET(pfcc_reg, in, pprx, rx_pause);
 
-	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
-				   sizeof(out), MLX5_REG_PFCC, 0, 1);
-	return err;
+	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+				    sizeof(out), MLX5_REG_PFCC, 0, 1);
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 8b494b5..30e2ba3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -345,6 +345,7 @@
 {
 	struct mlx5_qp_table *table = &dev->priv.qp_table;
 
+	memset(table, 0, sizeof(*table));
 	spin_lock_init(&table->lock);
 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
 	mlx5_qp_debugfs_init(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index c48f504..ffada80 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -531,6 +531,7 @@
 {
 	struct mlx5_srq_table *table = &dev->priv.srq_table;
 
+	memset(table, 0, sizeof(*table));
 	spin_lock_init(&table->lock);
 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index b4c87c7..d7068f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -177,7 +177,7 @@
 
 void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
 {
-	u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
+	u32 in[MLX5_ST_SZ_DW(destroy_tir_in)];
 	u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
 
 	memset(in, 0, sizeof(in));
@@ -206,7 +206,7 @@
 
 void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
 {
-	u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
+	u32 in[MLX5_ST_SZ_DW(destroy_tis_in)];
 	u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
 
 	memset(in, 0, sizeof(in));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 3e52ee9..d448431 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -868,7 +868,7 @@
 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
 
 	switch (attr->id) {
-	case SWITCHDEV_ATTR_PORT_PARENT_ID:
+	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
 		attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
 		memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
 		break;
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 66d4ab7..60f43ec 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1601,6 +1601,7 @@
 	{ .compatible = "micrel,ks8851" },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, ks8851_match_table);
 
 static struct spi_driver ks8851_driver = {
 	.driver = {
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 3fd8ca6..36a09d9 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -33,4 +33,13 @@
 	  Enable the verify after the buffer write useful for debugging purpose.
 	  If unsure, say N.
 
+config ENCX24J600
+    tristate "ENCX24J600 support"
+    depends on SPI
+    ---help---
+      Support for the Microchip ENC424J600/624J600 ethernet chip.
+
+      To compile this driver as a module, choose M here. The module will be
+      called encx24j600.
+
 endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index 573d429..ff78f62 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_ENC28J60) += enc28j60.o
+obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
new file mode 100644
index 0000000..f3bb905
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -0,0 +1,513 @@
+/**
+ * Register map access API - ENCX24J600 support
+ *
+ * Copyright 2015 Gridpoint
+ *
+ * Author: Jon Ringle <jringle@gridpoint.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "encx24j600_hw.h"
+
+static inline bool is_bits_set(int value, int mask)
+{
+	return (value & mask) == mask;
+}
+
+static int encx24j600_switch_bank(struct encx24j600_context *ctx,
+					 int bank)
+{
+	int ret = 0;
+
+	int bank_opcode = BANK_SELECT(bank);
+	ret = spi_write(ctx->spi, &bank_opcode, 1);
+	if (ret == 0)
+		ctx->bank = bank;
+
+	return ret;
+}
+
+static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode,
+			    const void *buf, size_t len)
+{
+	struct spi_message m;
+	struct spi_transfer t[2] = { { .tx_buf = &opcode, .len = 1, },
+				     { .tx_buf = buf, .len = len }, };
+	spi_message_init(&m);
+	spi_message_add_tail(&t[0], &m);
+	spi_message_add_tail(&t[1], &m);
+
+	return spi_sync(ctx->spi, &m);
+}
+
+static void regmap_lock_mutex(void *context)
+{
+	struct encx24j600_context *ctx = context;
+	mutex_lock(&ctx->mutex);
+}
+
+static void regmap_unlock_mutex(void *context)
+{
+	struct encx24j600_context *ctx = context;
+	mutex_unlock(&ctx->mutex);
+}
+
+static int regmap_encx24j600_sfr_read(void *context, u8 reg, u8 *val,
+				      size_t len)
+{
+	struct encx24j600_context *ctx = context;
+	u8 banked_reg = reg & ADDR_MASK;
+	u8 bank = ((reg & BANK_MASK) >> BANK_SHIFT);
+	u8 cmd = RCRU;
+	int ret = 0;
+	int i = 0;
+	u8 tx_buf[2];
+
+	if (reg < 0x80) {
+		cmd = RCRCODE | banked_reg;
+		if ((banked_reg < 0x16) && (ctx->bank != bank))
+			ret = encx24j600_switch_bank(ctx, bank);
+		if (unlikely(ret))
+			return ret;
+	} else {
+		/* Translate registers that are more effecient using
+		 * 3-byte SPI commands
+		 */
+		switch (reg) {
+		case EGPRDPT:
+			cmd = RGPRDPT; break;
+		case EGPWRPT:
+			cmd = RGPWRPT; break;
+		case ERXRDPT:
+			cmd = RRXRDPT; break;
+		case ERXWRPT:
+			cmd = RRXWRPT; break;
+		case EUDARDPT:
+			cmd = RUDARDPT; break;
+		case EUDAWRPT:
+			cmd = RUDAWRPT; break;
+		case EGPDATA:
+		case ERXDATA:
+		case EUDADATA:
+		default:
+			return -EINVAL;
+		}
+	}
+
+	tx_buf[i++] = cmd;
+	if (cmd == RCRU)
+		tx_buf[i++] = reg;
+
+	ret = spi_write_then_read(ctx->spi, tx_buf, i, val, len);
+
+	return ret;
+}
+
+static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx,
+					u8 reg, u8 *val, size_t len,
+					u8 unbanked_cmd, u8 banked_code)
+{
+	u8 banked_reg = reg & ADDR_MASK;
+	u8 bank = ((reg & BANK_MASK) >> BANK_SHIFT);
+	u8 cmd = unbanked_cmd;
+	struct spi_message m;
+	struct spi_transfer t[3] = { { .tx_buf = &cmd, .len = sizeof(cmd), },
+				     { .tx_buf = &reg, .len = sizeof(reg), },
+				     { .tx_buf = val, .len = len }, };
+
+	if (reg < 0x80) {
+		int ret = 0;
+		cmd = banked_code | banked_reg;
+		if ((banked_reg < 0x16) && (ctx->bank != bank))
+			ret = encx24j600_switch_bank(ctx, bank);
+		if (unlikely(ret))
+			return ret;
+	} else {
+		/* Translate registers that are more effecient using
+		 * 3-byte SPI commands
+		 */
+		switch (reg) {
+		case EGPRDPT:
+			cmd = WGPRDPT; break;
+		case EGPWRPT:
+			cmd = WGPWRPT; break;
+		case ERXRDPT:
+			cmd = WRXRDPT; break;
+		case ERXWRPT:
+			cmd = WRXWRPT; break;
+		case EUDARDPT:
+			cmd = WUDARDPT; break;
+		case EUDAWRPT:
+			cmd = WUDAWRPT; break;
+		case EGPDATA:
+		case ERXDATA:
+		case EUDADATA:
+		default:
+			return -EINVAL;
+		}
+	}
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t[0], &m);
+
+	if (cmd == unbanked_cmd) {
+		t[1].tx_buf = &reg;
+		spi_message_add_tail(&t[1], &m);
+	}
+
+	spi_message_add_tail(&t[2], &m);
+	return spi_sync(ctx->spi, &m);
+}
+
+static int regmap_encx24j600_sfr_write(void *context, u8 reg, u8 *val,
+				       size_t len)
+{
+	struct encx24j600_context *ctx = context;
+	return regmap_encx24j600_sfr_update(ctx, reg, val, len, WCRU, WCRCODE);
+}
+
+static int regmap_encx24j600_sfr_set_bits(struct encx24j600_context *ctx,
+					  u8 reg, u8 val)
+{
+	return regmap_encx24j600_sfr_update(ctx, reg, &val, 1, BFSU, BFSCODE);
+}
+
+static int regmap_encx24j600_sfr_clr_bits(struct encx24j600_context *ctx,
+					  u8 reg, u8 val)
+{
+	return regmap_encx24j600_sfr_update(ctx, reg, &val, 1, BFCU, BFCCODE);
+}
+
+static int regmap_encx24j600_reg_update_bits(void *context, unsigned int reg,
+					     unsigned int mask,
+					     unsigned int val)
+{
+	struct encx24j600_context *ctx = context;
+
+	int ret = 0;
+	unsigned int set_mask = mask & val;
+	unsigned int clr_mask = mask & ~val;
+
+	if ((reg >= 0x40 && reg < 0x6c) || reg >= 0x80)
+		return -EINVAL;
+
+	if (set_mask & 0xff)
+		ret = regmap_encx24j600_sfr_set_bits(ctx, reg, set_mask);
+
+	set_mask = (set_mask & 0xff00) >> 8;
+
+	if ((set_mask & 0xff) && (ret == 0))
+		ret = regmap_encx24j600_sfr_set_bits(ctx, reg + 1, set_mask);
+
+	if ((clr_mask & 0xff) && (ret == 0))
+		ret = regmap_encx24j600_sfr_clr_bits(ctx, reg, clr_mask);
+
+	clr_mask = (clr_mask & 0xff00) >> 8;
+
+	if ((clr_mask & 0xff) && (ret == 0))
+		ret = regmap_encx24j600_sfr_clr_bits(ctx, reg + 1, clr_mask);
+
+	return ret;
+}
+
+int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
+				size_t count)
+{
+	struct encx24j600_context *ctx = context;
+
+	if (reg < 0xc0)
+		return encx24j600_cmdn(ctx, reg, data, count);
+	else
+		/* SPI 1-byte command. Ignore data */
+		return spi_write(ctx->spi, &reg, 1);
+}
+EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_write);
+
+int regmap_encx24j600_spi_read(void *context, u8 reg, u8 *data, size_t count)
+{
+	struct encx24j600_context *ctx = context;
+
+	if (reg == RBSEL && count > 1)
+		count = 1;
+
+	return spi_write_then_read(ctx->spi, &reg, sizeof(reg), data, count);
+}
+EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_read);
+
+static int regmap_encx24j600_write(void *context, const void *data,
+				   size_t len)
+{
+	u8 *dout = (u8 *)data;
+	u8 reg = dout[0];
+	++dout;
+	--len;
+
+	if (reg > 0xa0)
+		return regmap_encx24j600_spi_write(context, reg, dout, len);
+
+	if (len > 2)
+		return -EINVAL;
+
+	return regmap_encx24j600_sfr_write(context, reg, dout, len);
+}
+
+static int regmap_encx24j600_read(void *context,
+				  const void *reg_buf, size_t reg_size,
+				  void *val, size_t val_size)
+{
+	u8 reg = *(const u8 *)reg_buf;
+
+	if (reg_size != 1) {
+		pr_err("%s: reg=%02x reg_size=%zu\n", __func__, reg, reg_size);
+		return -EINVAL;
+	}
+
+	if (reg > 0xa0)
+		return regmap_encx24j600_spi_read(context, reg, val, val_size);
+
+	if (val_size > 2) {
+		pr_err("%s: reg=%02x val_size=%zu\n", __func__, reg, val_size);
+		return -EINVAL;
+	}
+
+	return regmap_encx24j600_sfr_read(context, reg, val, val_size);
+}
+
+static bool encx24j600_regmap_readable(struct device *dev, unsigned int reg)
+{
+	if ((reg < 0x36) ||
+	    ((reg >= 0x40) && (reg < 0x4c)) ||
+	    ((reg >= 0x52) && (reg < 0x56)) ||
+	    ((reg >= 0x60) && (reg < 0x66)) ||
+	    ((reg >= 0x68) && (reg < 0x80)) ||
+	    ((reg >= 0x86) && (reg < 0x92)) ||
+	    (reg == 0xc8))
+		return true;
+	else
+		return false;
+}
+
+static bool encx24j600_regmap_writeable(struct device *dev, unsigned int reg)
+{
+	if ((reg < 0x12) ||
+	    ((reg >= 0x14) && (reg < 0x1a)) ||
+	    ((reg >= 0x1c) && (reg < 0x36)) ||
+	    ((reg >= 0x40) && (reg < 0x4c)) ||
+	    ((reg >= 0x52) && (reg < 0x56)) ||
+	    ((reg >= 0x60) && (reg < 0x68)) ||
+	    ((reg >= 0x6c) && (reg < 0x80)) ||
+	    ((reg >= 0x86) && (reg < 0x92)) ||
+	    ((reg >= 0xc0) && (reg < 0xc8)) ||
+	    ((reg >= 0xca) && (reg < 0xf0)))
+		return true;
+	else
+		return false;
+}
+
+static bool encx24j600_regmap_volatile(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case ERXHEAD:
+	case EDMACS:
+	case ETXSTAT:
+	case ETXWIRE:
+	case ECON1:	/* Can be modified via single byte cmds */
+	case ECON2:	/* Can be modified via single byte cmds */
+	case ESTAT:
+	case EIR:	/* Can be modified via single byte cmds */
+	case MIRD:
+	case MISTAT:
+		return true;
+	default:
+		break;
+	}
+
+	return false;
+}
+
+static bool encx24j600_regmap_precious(struct device *dev, unsigned int reg)
+{
+	/* single byte cmds are precious */
+	if (((reg >= 0xc0) && (reg < 0xc8)) ||
+	    ((reg >= 0xca) && (reg < 0xf0)))
+		return true;
+	else
+		return false;
+}
+
+static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg,
+					  unsigned int *val)
+{
+	struct encx24j600_context *ctx = context;
+	int ret;
+	unsigned int mistat;
+
+	reg = MIREGADR_VAL | (reg & PHREG_MASK);
+	ret = regmap_write(ctx->regmap, MIREGADR, reg);
+	if (unlikely(ret))
+		goto err_out;
+
+	ret = regmap_write(ctx->regmap, MICMD, MIIRD);
+	if (unlikely(ret))
+		goto err_out;
+
+	usleep_range(26, 100);
+	while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
+	       (mistat & BUSY))
+		cpu_relax();
+
+	if (unlikely(ret))
+		goto err_out;
+
+	ret = regmap_write(ctx->regmap, MICMD, 0);
+	if (unlikely(ret))
+		goto err_out;
+
+	ret = regmap_read(ctx->regmap, MIRD, val);
+
+err_out:
+	if (ret)
+		pr_err("%s: error %d reading reg %02x\n", __func__, ret,
+		       reg & PHREG_MASK);
+
+	return ret;
+}
+
+static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg,
+					   unsigned int val)
+{
+	struct encx24j600_context *ctx = context;
+	int ret;
+	unsigned int mistat;
+
+	reg = MIREGADR_VAL | (reg & PHREG_MASK);
+	ret = regmap_write(ctx->regmap, MIREGADR, reg);
+	if (unlikely(ret))
+		goto err_out;
+
+	ret = regmap_write(ctx->regmap, MIWR, val);
+	if (unlikely(ret))
+		goto err_out;
+
+	usleep_range(26, 100);
+	while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
+	       (mistat & BUSY))
+		cpu_relax();
+
+err_out:
+	if (ret)
+		pr_err("%s: error %d writing reg %02x=%04x\n", __func__, ret,
+		       reg & PHREG_MASK, val);
+
+	return ret;
+}
+
+static bool encx24j600_phymap_readable(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case PHCON1:
+	case PHSTAT1:
+	case PHANA:
+	case PHANLPA:
+	case PHANE:
+	case PHCON2:
+	case PHSTAT2:
+	case PHSTAT3:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool encx24j600_phymap_writeable(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case PHCON1:
+	case PHCON2:
+	case PHANA:
+		return true;
+	case PHSTAT1:
+	case PHSTAT2:
+	case PHSTAT3:
+	case PHANLPA:
+	case PHANE:
+	default:
+		return false;
+	}
+}
+
+static bool encx24j600_phymap_volatile(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case PHSTAT1:
+	case PHSTAT2:
+	case PHSTAT3:
+	case PHANLPA:
+	case PHANE:
+	case PHCON2:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static struct regmap_config regcfg = {
+	.name = "reg",
+	.reg_bits = 8,
+	.val_bits = 16,
+	.max_register = 0xee,
+	.reg_stride = 2,
+	.cache_type = REGCACHE_RBTREE,
+	.val_format_endian = REGMAP_ENDIAN_LITTLE,
+	.readable_reg = encx24j600_regmap_readable,
+	.writeable_reg = encx24j600_regmap_writeable,
+	.volatile_reg = encx24j600_regmap_volatile,
+	.precious_reg = encx24j600_regmap_precious,
+	.lock = regmap_lock_mutex,
+	.unlock = regmap_unlock_mutex,
+};
+
+static struct regmap_bus regmap_encx24j600 = {
+	.write = regmap_encx24j600_write,
+	.read = regmap_encx24j600_read,
+	.reg_update_bits = regmap_encx24j600_reg_update_bits,
+};
+
+static struct regmap_config phycfg = {
+	.name = "phy",
+	.reg_bits = 8,
+	.val_bits = 16,
+	.max_register = 0x1f,
+	.cache_type = REGCACHE_RBTREE,
+	.val_format_endian = REGMAP_ENDIAN_LITTLE,
+	.readable_reg = encx24j600_phymap_readable,
+	.writeable_reg = encx24j600_phymap_writeable,
+	.volatile_reg = encx24j600_phymap_volatile,
+};
+static struct regmap_bus phymap_encx24j600 = {
+	.reg_write = regmap_encx24j600_phy_reg_write,
+	.reg_read = regmap_encx24j600_phy_reg_read,
+};
+
+void devm_regmap_init_encx24j600(struct device *dev,
+				 struct encx24j600_context *ctx)
+{
+	mutex_init(&ctx->mutex);
+	regcfg.lock_arg = ctx;
+	ctx->regmap = devm_regmap_init(dev, &regmap_encx24j600, ctx, &regcfg);
+	ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
new file mode 100644
index 0000000..e1329d9
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -0,0 +1,1127 @@
+/**
+ * Microchip ENCX24J600 ethernet driver
+ *
+ * Copyright (C) 2015 Gridpoint
+ * Author: Jon Ringle <jringle@gridpoint.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spi/spi.h>
+
+#include "encx24j600_hw.h"
+
+#define DRV_NAME	"encx24j600"
+#define DRV_VERSION	"1.0"
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+/* SRAM memory layout:
+ *
+ * 0x0000-0x05ff TX buffers  1.5KB  (1*1536) reside in the GP area in SRAM
+ * 0x0600-0x5fff RX buffers 22.5KB (15*1536) reside in the RX area in SRAM
+ */
+#define ENC_TX_BUF_START 0x0000U
+#define ENC_RX_BUF_START 0x0600U
+#define ENC_RX_BUF_END   0x5fffU
+#define ENC_SRAM_SIZE    0x6000U
+
+enum {
+	RXFILTER_NORMAL,
+	RXFILTER_MULTI,
+	RXFILTER_PROMISC
+};
+
+struct encx24j600_priv {
+	struct net_device        *ndev;
+	struct mutex              lock; /* device access lock */
+	struct encx24j600_context ctx;
+	struct sk_buff           *tx_skb;
+	struct task_struct       *kworker_task;
+	struct kthread_worker     kworker;
+	struct kthread_work       tx_work;
+	struct kthread_work       setrx_work;
+	u16                       next_packet;
+	bool                      hw_enabled;
+	bool                      full_duplex;
+	bool                      autoneg;
+	u16                       speed;
+	int                       rxfilter;
+	u32                       msg_enable;
+};
+
+static void dump_packet(const char *msg, int len, const char *data)
+{
+	pr_debug(DRV_NAME ": %s - packet len:%d\n", msg, len);
+	print_hex_dump_bytes("pk data: ", DUMP_PREFIX_OFFSET, data, len);
+}
+
+static void encx24j600_dump_rsv(struct encx24j600_priv *priv, const char *msg,
+				struct rsv *rsv)
+{
+	struct net_device *dev = priv->ndev;
+
+	netdev_info(dev, "RX packet Len:%d\n", rsv->len);
+	netdev_dbg(dev, "%s - NextPk: 0x%04x\n", msg,
+		   rsv->next_packet);
+	netdev_dbg(dev, "RxOK: %d, DribbleNibble: %d\n",
+		   RSV_GETBIT(rsv->rxstat, RSV_RXOK),
+		   RSV_GETBIT(rsv->rxstat, RSV_DRIBBLENIBBLE));
+	netdev_dbg(dev, "CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
+		   RSV_GETBIT(rsv->rxstat, RSV_CRCERROR),
+		   RSV_GETBIT(rsv->rxstat, RSV_LENCHECKERR),
+		   RSV_GETBIT(rsv->rxstat, RSV_LENOUTOFRANGE));
+	netdev_dbg(dev, "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n",
+		   RSV_GETBIT(rsv->rxstat, RSV_RXMULTICAST),
+		   RSV_GETBIT(rsv->rxstat, RSV_RXBROADCAST),
+		   RSV_GETBIT(rsv->rxstat, RSV_RXLONGEVDROPEV),
+		   RSV_GETBIT(rsv->rxstat, RSV_CARRIEREV));
+	netdev_dbg(dev, "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n",
+		   RSV_GETBIT(rsv->rxstat, RSV_RXCONTROLFRAME),
+		   RSV_GETBIT(rsv->rxstat, RSV_RXPAUSEFRAME),
+		   RSV_GETBIT(rsv->rxstat, RSV_RXUNKNOWNOPCODE),
+		   RSV_GETBIT(rsv->rxstat, RSV_RXTYPEVLAN));
+}
+
+static u16 encx24j600_read_reg(struct encx24j600_priv *priv, u8 reg)
+{
+	struct net_device *dev = priv->ndev;
+	unsigned int val = 0;
+	int ret = regmap_read(priv->ctx.regmap, reg, &val);
+	if (unlikely(ret))
+		netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n",
+			  __func__, ret, reg);
+	return val;
+}
+
+static void encx24j600_write_reg(struct encx24j600_priv *priv, u8 reg, u16 val)
+{
+	struct net_device *dev = priv->ndev;
+	int ret = regmap_write(priv->ctx.regmap, reg, val);
+	if (unlikely(ret))
+		netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
+			  __func__, ret, reg, val);
+}
+
+static void encx24j600_update_reg(struct encx24j600_priv *priv, u8 reg,
+				  u16 mask, u16 val)
+{
+	struct net_device *dev = priv->ndev;
+	int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val);
+	if (unlikely(ret))
+		netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n",
+			  __func__, ret, reg, val, mask);
+}
+
+static u16 encx24j600_read_phy(struct encx24j600_priv *priv, u8 reg)
+{
+	struct net_device *dev = priv->ndev;
+	unsigned int val = 0;
+	int ret = regmap_read(priv->ctx.phymap, reg, &val);
+	if (unlikely(ret))
+		netif_err(priv, drv, dev, "%s: error %d reading %02x\n",
+			  __func__, ret, reg);
+	return val;
+}
+
+static void encx24j600_write_phy(struct encx24j600_priv *priv, u8 reg, u16 val)
+{
+	struct net_device *dev = priv->ndev;
+	int ret = regmap_write(priv->ctx.phymap, reg, val);
+	if (unlikely(ret))
+		netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
+			  __func__, ret, reg, val);
+}
+
+static void encx24j600_clr_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
+{
+	encx24j600_update_reg(priv, reg, mask, 0);
+}
+
+static void encx24j600_set_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
+{
+	encx24j600_update_reg(priv, reg, mask, mask);
+}
+
+static void encx24j600_cmd(struct encx24j600_priv *priv, u8 cmd)
+{
+	struct net_device *dev = priv->ndev;
+	int ret = regmap_write(priv->ctx.regmap, cmd, 0);
+	if (unlikely(ret))
+		netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n",
+			  __func__, ret, cmd);
+}
+
+static int encx24j600_raw_read(struct encx24j600_priv *priv, u8 reg, u8 *data,
+			       size_t count)
+{
+	int ret;
+	mutex_lock(&priv->ctx.mutex);
+	ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count);
+	mutex_unlock(&priv->ctx.mutex);
+
+	return ret;
+}
+
+static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
+				const u8 *data, size_t count)
+{
+	int ret;
+	mutex_lock(&priv->ctx.mutex);
+	ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count);
+	mutex_unlock(&priv->ctx.mutex);
+
+	return ret;
+}
+
+static void encx24j600_update_phcon1(struct encx24j600_priv *priv)
+{
+	u16 phcon1 = encx24j600_read_phy(priv, PHCON1);
+	if (priv->autoneg == AUTONEG_ENABLE) {
+		phcon1 |= ANEN | RENEG;
+	} else {
+		phcon1 &= ~ANEN;
+		if (priv->speed == SPEED_100)
+			phcon1 |= SPD100;
+		else
+			phcon1 &= ~SPD100;
+
+		if (priv->full_duplex)
+			phcon1 |= PFULDPX;
+		else
+			phcon1 &= ~PFULDPX;
+	}
+	encx24j600_write_phy(priv, PHCON1, phcon1);
+}
+
+/* Waits for autonegotiation to complete. */
+static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv)
+{
+	struct net_device *dev = priv->ndev;
+	unsigned long timeout = jiffies + msecs_to_jiffies(2000);
+	u16 phstat1;
+	u16 estat;
+	int ret = 0;
+
+	phstat1 = encx24j600_read_phy(priv, PHSTAT1);
+	while ((phstat1 & ANDONE) == 0) {
+		if (time_after(jiffies, timeout)) {
+			u16 phstat3;
+
+			netif_notice(priv, drv, dev, "timeout waiting for autoneg done\n");
+
+			priv->autoneg = AUTONEG_DISABLE;
+			phstat3 = encx24j600_read_phy(priv, PHSTAT3);
+			priv->speed = (phstat3 & PHY3SPD100)
+				      ? SPEED_100 : SPEED_10;
+			priv->full_duplex = (phstat3 & PHY3DPX) ? 1 : 0;
+			encx24j600_update_phcon1(priv);
+			netif_notice(priv, drv, dev, "Using parallel detection: %s/%s",
+				     priv->speed == SPEED_100 ? "100" : "10",
+				     priv->full_duplex ? "Full" : "Half");
+
+			return -ETIMEDOUT;
+		}
+		cpu_relax();
+		phstat1 = encx24j600_read_phy(priv, PHSTAT1);
+	}
+
+	estat = encx24j600_read_reg(priv, ESTAT);
+	if (estat & PHYDPX) {
+		encx24j600_set_bits(priv, MACON2, FULDPX);
+		encx24j600_write_reg(priv, MABBIPG, 0x15);
+	} else {
+		encx24j600_clr_bits(priv, MACON2, FULDPX);
+		encx24j600_write_reg(priv, MABBIPG, 0x12);
+		/* Max retransmittions attempt  */
+		encx24j600_write_reg(priv, MACLCON, 0x370f);
+	}
+
+	return ret;
+}
+
+/* Access the PHY to determine link status */
+static void encx24j600_check_link_status(struct encx24j600_priv *priv)
+{
+	struct net_device *dev = priv->ndev;
+	u16 estat;
+
+	estat = encx24j600_read_reg(priv, ESTAT);
+
+	if (estat & PHYLNK) {
+		if (priv->autoneg == AUTONEG_ENABLE)
+			encx24j600_wait_for_autoneg(priv);
+
+		netif_carrier_on(dev);
+		netif_info(priv, ifup, dev, "link up\n");
+	} else {
+		netif_info(priv, ifdown, dev, "link down\n");
+
+		/* Re-enable autoneg since we won't know what we might be
+		 * connected to when the link is brought back up again.
+		 */
+		priv->autoneg  = AUTONEG_ENABLE;
+		priv->full_duplex = true;
+		priv->speed = SPEED_100;
+		netif_carrier_off(dev);
+	}
+}
+
+static void encx24j600_int_link_handler(struct encx24j600_priv *priv)
+{
+	struct net_device *dev = priv->ndev;
+
+	netif_dbg(priv, intr, dev, "%s", __func__);
+	encx24j600_check_link_status(priv);
+	encx24j600_clr_bits(priv, EIR, LINKIF);
+}
+
+static void encx24j600_tx_complete(struct encx24j600_priv *priv, bool err)
+{
+	struct net_device *dev = priv->ndev;
+
+	if (!priv->tx_skb) {
+		BUG();
+		return;
+	}
+
+	mutex_lock(&priv->lock);
+
+	if (err)
+		dev->stats.tx_errors++;
+	else
+		dev->stats.tx_packets++;
+
+	dev->stats.tx_bytes += priv->tx_skb->len;
+
+	encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
+
+	netif_dbg(priv, tx_done, dev, "TX Done%s\n", err ? ": Err" : "");
+
+	dev_kfree_skb(priv->tx_skb);
+	priv->tx_skb = NULL;
+
+	netif_wake_queue(dev);
+
+	mutex_unlock(&priv->lock);
+}
+
+static int encx24j600_receive_packet(struct encx24j600_priv *priv,
+				     struct rsv *rsv)
+{
+	struct net_device *dev = priv->ndev;
+	struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN);
+	if (!skb) {
+		pr_err_ratelimited("RX: OOM: packet dropped\n");
+		dev->stats.rx_dropped++;
+		return -ENOMEM;
+	}
+	skb_reserve(skb, NET_IP_ALIGN);
+	encx24j600_raw_read(priv, RRXDATA, skb_put(skb, rsv->len), rsv->len);
+
+	if (netif_msg_pktdata(priv))
+		dump_packet("RX", skb->len, skb->data);
+
+	skb->dev = dev;
+	skb->protocol = eth_type_trans(skb, dev);
+	skb->ip_summed = CHECKSUM_COMPLETE;
+
+	/* Maintain stats */
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += rsv->len;
+	priv->next_packet = rsv->next_packet;
+
+	netif_rx(skb);
+
+	return 0;
+}
+
+static void encx24j600_rx_packets(struct encx24j600_priv *priv, u8 packet_count)
+{
+	struct net_device *dev = priv->ndev;
+
+	while (packet_count--) {
+		struct rsv rsv;
+		u16 newrxtail;
+
+		encx24j600_write_reg(priv, ERXRDPT, priv->next_packet);
+		encx24j600_raw_read(priv, RRXDATA, (u8 *)&rsv, sizeof(rsv));
+
+		if (netif_msg_rx_status(priv))
+			encx24j600_dump_rsv(priv, __func__, &rsv);
+
+		if (!RSV_GETBIT(rsv.rxstat, RSV_RXOK) ||
+		    (rsv.len > MAX_FRAMELEN)) {
+			netif_err(priv, rx_err, dev, "RX Error %04x\n",
+				  rsv.rxstat);
+			dev->stats.rx_errors++;
+
+			if (RSV_GETBIT(rsv.rxstat, RSV_CRCERROR))
+				dev->stats.rx_crc_errors++;
+			if (RSV_GETBIT(rsv.rxstat, RSV_LENCHECKERR))
+				dev->stats.rx_frame_errors++;
+			if (rsv.len > MAX_FRAMELEN)
+				dev->stats.rx_over_errors++;
+		} else {
+			encx24j600_receive_packet(priv, &rsv);
+		}
+
+		newrxtail = priv->next_packet - 2;
+		if (newrxtail == ENC_RX_BUF_START)
+			newrxtail = SRAM_SIZE - 2;
+
+		encx24j600_cmd(priv, SETPKTDEC);
+		encx24j600_write_reg(priv, ERXTAIL, newrxtail);
+	}
+}
+
+static irqreturn_t encx24j600_isr(int irq, void *dev_id)
+{
+	struct encx24j600_priv *priv = dev_id;
+	struct net_device *dev = priv->ndev;
+	int eir;
+
+	/* Clear interrupts */
+	encx24j600_cmd(priv, CLREIE);
+
+	eir = encx24j600_read_reg(priv, EIR);
+
+	if (eir & LINKIF)
+		encx24j600_int_link_handler(priv);
+
+	if (eir & TXIF)
+		encx24j600_tx_complete(priv, false);
+
+	if (eir & TXABTIF)
+		encx24j600_tx_complete(priv, true);
+
+	if (eir & RXABTIF) {
+		if (eir & PCFULIF) {
+			/* Packet counter is full */
+			netif_err(priv, rx_err, dev, "Packet counter full\n");
+		}
+		dev->stats.rx_dropped++;
+		encx24j600_clr_bits(priv, EIR, RXABTIF);
+	}
+
+	if (eir & PKTIF) {
+		u8 packet_count;
+
+		mutex_lock(&priv->lock);
+
+		packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
+		while (packet_count) {
+			encx24j600_rx_packets(priv, packet_count);
+			packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
+		}
+
+		mutex_unlock(&priv->lock);
+	}
+
+	/* Enable interrupts */
+	encx24j600_cmd(priv, SETEIE);
+
+	return IRQ_HANDLED;
+}
+
+static int encx24j600_soft_reset(struct encx24j600_priv *priv)
+{
+	int ret = 0;
+	int timeout;
+	u16 eudast;
+
+	/* Write and verify a test value to EUDAST */
+	regcache_cache_bypass(priv->ctx.regmap, true);
+	timeout = 10;
+	do {
+		encx24j600_write_reg(priv, EUDAST, EUDAST_TEST_VAL);
+		eudast = encx24j600_read_reg(priv, EUDAST);
+		usleep_range(25, 100);
+	} while ((eudast != EUDAST_TEST_VAL) && --timeout);
+	regcache_cache_bypass(priv->ctx.regmap, false);
+
+	if (timeout == 0) {
+		ret = -ETIMEDOUT;
+		goto err_out;
+	}
+
+	/* Wait for CLKRDY to become set */
+	timeout = 10;
+	while (!(encx24j600_read_reg(priv, ESTAT) & CLKRDY) && --timeout)
+		usleep_range(25, 100);
+
+	if (timeout == 0) {
+		ret = -ETIMEDOUT;
+		goto err_out;
+	}
+
+	/* Issue a System Reset command */
+	encx24j600_cmd(priv, SETETHRST);
+	usleep_range(25, 100);
+
+	/* Confirm that EUDAST has 0000h after system reset */
+	if (encx24j600_read_reg(priv, EUDAST) != 0) {
+		ret = -EINVAL;
+		goto err_out;
+	}
+
+	/* Wait for PHY register and status bits to become available */
+	usleep_range(256, 1000);
+
+err_out:
+	return ret;
+}
+
+static int encx24j600_hw_reset(struct encx24j600_priv *priv)
+{
+	int ret;
+
+	mutex_lock(&priv->lock);
+	ret = encx24j600_soft_reset(priv);
+	mutex_unlock(&priv->lock);
+
+	return ret;
+}
+
+static void encx24j600_reset_hw_tx(struct encx24j600_priv *priv)
+{
+	encx24j600_set_bits(priv, ECON2, TXRST);
+	encx24j600_clr_bits(priv, ECON2, TXRST);
+}
+
+static void encx24j600_hw_init_tx(struct encx24j600_priv *priv)
+{
+	/* Reset TX */
+	encx24j600_reset_hw_tx(priv);
+
+	/* Clear the TXIF flag if were previously set */
+	encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
+
+	/* Write the Tx Buffer pointer */
+	encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
+}
+
+static void encx24j600_hw_init_rx(struct encx24j600_priv *priv)
+{
+	encx24j600_cmd(priv, DISABLERX);
+
+	/* Set up RX packet start address in the SRAM */
+	encx24j600_write_reg(priv, ERXST, ENC_RX_BUF_START);
+
+	/* Preload the RX Data pointer to the beginning of the RX area */
+	encx24j600_write_reg(priv, ERXRDPT, ENC_RX_BUF_START);
+
+	priv->next_packet = ENC_RX_BUF_START;
+
+	/* Set up RX end address in the SRAM */
+	encx24j600_write_reg(priv, ERXTAIL, ENC_SRAM_SIZE - 2);
+
+	/* Reset the  user data pointers    */
+	encx24j600_write_reg(priv, EUDAST, ENC_SRAM_SIZE);
+	encx24j600_write_reg(priv, EUDAND, ENC_SRAM_SIZE + 1);
+
+	/* Set Max Frame length */
+	encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
+}
+
+static void encx24j600_dump_config(struct encx24j600_priv *priv,
+				   const char *msg)
+{
+	pr_info(DRV_NAME ": %s\n", msg);
+
+	/* CHIP configuration */
+	pr_info(DRV_NAME " ECON1:   %04X\n", encx24j600_read_reg(priv, ECON1));
+	pr_info(DRV_NAME " ECON2:   %04X\n", encx24j600_read_reg(priv, ECON2));
+	pr_info(DRV_NAME " ERXFCON: %04X\n", encx24j600_read_reg(priv,
+								 ERXFCON));
+	pr_info(DRV_NAME " ESTAT:   %04X\n", encx24j600_read_reg(priv, ESTAT));
+	pr_info(DRV_NAME " EIR:     %04X\n", encx24j600_read_reg(priv, EIR));
+	pr_info(DRV_NAME " EIDLED:  %04X\n", encx24j600_read_reg(priv, EIDLED));
+
+	/* MAC layer configuration */
+	pr_info(DRV_NAME " MACON1:  %04X\n", encx24j600_read_reg(priv, MACON1));
+	pr_info(DRV_NAME " MACON2:  %04X\n", encx24j600_read_reg(priv, MACON2));
+	pr_info(DRV_NAME " MAIPG:   %04X\n", encx24j600_read_reg(priv, MAIPG));
+	pr_info(DRV_NAME " MACLCON: %04X\n", encx24j600_read_reg(priv,
+								 MACLCON));
+	pr_info(DRV_NAME " MABBIPG: %04X\n", encx24j600_read_reg(priv,
+								 MABBIPG));
+
+	/* PHY configuation */
+	pr_info(DRV_NAME " PHCON1:  %04X\n", encx24j600_read_phy(priv, PHCON1));
+	pr_info(DRV_NAME " PHCON2:  %04X\n", encx24j600_read_phy(priv, PHCON2));
+	pr_info(DRV_NAME " PHANA:   %04X\n", encx24j600_read_phy(priv, PHANA));
+	pr_info(DRV_NAME " PHANLPA: %04X\n", encx24j600_read_phy(priv,
+								 PHANLPA));
+	pr_info(DRV_NAME " PHANE:   %04X\n", encx24j600_read_phy(priv, PHANE));
+	pr_info(DRV_NAME " PHSTAT1: %04X\n", encx24j600_read_phy(priv,
+								 PHSTAT1));
+	pr_info(DRV_NAME " PHSTAT2: %04X\n", encx24j600_read_phy(priv,
+								 PHSTAT2));
+	pr_info(DRV_NAME " PHSTAT3: %04X\n", encx24j600_read_phy(priv,
+								 PHSTAT3));
+}
+
+static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv)
+{
+	switch (priv->rxfilter) {
+	case RXFILTER_PROMISC:
+		encx24j600_set_bits(priv, MACON1, PASSALL);
+		encx24j600_write_reg(priv, ERXFCON, UCEN | MCEN | NOTMEEN);
+		break;
+	case RXFILTER_MULTI:
+		encx24j600_clr_bits(priv, MACON1, PASSALL);
+		encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN | MCEN);
+		break;
+	case RXFILTER_NORMAL:
+	default:
+		encx24j600_clr_bits(priv, MACON1, PASSALL);
+		encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN);
+		break;
+	}
+}
+
+static int encx24j600_hw_init(struct encx24j600_priv *priv)
+{
+	struct net_device *dev = priv->ndev;
+	int ret = 0;
+	u16 eidled;
+	u16 macon2;
+
+	priv->hw_enabled = false;
+
+	eidled = encx24j600_read_reg(priv, EIDLED);
+	if (((eidled & DEVID_MASK) >> DEVID_SHIFT) != ENCX24J600_DEV_ID) {
+		ret = -EINVAL;
+		goto err_out;
+	}
+
+	netif_info(priv, drv, dev, "Silicon rev ID: 0x%02x\n",
+		   (eidled & REVID_MASK) >> REVID_SHIFT);
+
+	/* PHY Leds: link status,
+	 * LEDA: Link + transmit/receive events
+	 * LEDB: Link State + colision events
+	 */
+	encx24j600_update_reg(priv, EIDLED, 0xbc00, 0xbc00);
+
+	/* Loopback disabled */
+	encx24j600_write_reg(priv, MACON1, 0x9);
+
+	/* interpacket gap value */
+	encx24j600_write_reg(priv, MAIPG, 0x0c12);
+
+	/* Write the auto negotiation pattern */
+	encx24j600_write_phy(priv, PHANA, PHANA_DEFAULT);
+
+	encx24j600_update_phcon1(priv);
+	encx24j600_check_link_status(priv);
+
+	macon2 = MACON2_RSV1 | TXCRCEN | PADCFG0 | PADCFG2 | MACON2_DEFER;
+	if ((priv->autoneg == AUTONEG_DISABLE) && priv->full_duplex)
+		macon2 |= FULDPX;
+
+	encx24j600_set_bits(priv, MACON2, macon2);
+
+	priv->rxfilter = RXFILTER_NORMAL;
+	encx24j600_set_rxfilter_mode(priv);
+
+	/* Program the Maximum frame length */
+	encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
+
+	/* Init Tx pointers */
+	encx24j600_hw_init_tx(priv);
+
+	/* Init Rx pointers */
+	encx24j600_hw_init_rx(priv);
+
+	if (netif_msg_hw(priv))
+		encx24j600_dump_config(priv, "Hw is initialized");
+
+err_out:
+	return ret;
+}
+
+static void encx24j600_hw_enable(struct encx24j600_priv *priv)
+{
+	/* Clear the interrupt flags in case was set */
+	encx24j600_clr_bits(priv, EIR, (PCFULIF | RXABTIF | TXABTIF | TXIF |
+					PKTIF | LINKIF));
+
+	/* Enable the interrupts */
+	encx24j600_write_reg(priv, EIE, (PCFULIE | RXABTIE | TXABTIE | TXIE |
+					 PKTIE | LINKIE | INTIE));
+
+	/* Enable RX */
+	encx24j600_cmd(priv, ENABLERX);
+
+	priv->hw_enabled = true;
+}
+
+static void encx24j600_hw_disable(struct encx24j600_priv *priv)
+{
+	/* Disable all interrupts */
+	encx24j600_write_reg(priv, EIE, 0);
+
+	/* Disable RX */
+	encx24j600_cmd(priv, DISABLERX);
+
+	priv->hw_enabled = false;
+}
+
+static int encx24j600_setlink(struct net_device *dev, u8 autoneg, u16 speed,
+			      u8 duplex)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+	int ret = 0;
+
+	if (!priv->hw_enabled) {
+		/* link is in low power mode now; duplex setting
+		 * will take effect on next encx24j600_hw_init()
+		 */
+		if (speed == SPEED_10 || speed == SPEED_100) {
+			priv->autoneg = (autoneg == AUTONEG_ENABLE);
+			priv->full_duplex = (duplex == DUPLEX_FULL);
+			priv->speed = (speed == SPEED_100);
+		} else {
+			netif_warn(priv, link, dev, "unsupported link speed setting\n");
+			/*speeds other than SPEED_10 and SPEED_100 */
+			/*are not supported by chip */
+			ret = -EOPNOTSUPP;
+		}
+	} else {
+		netif_warn(priv, link, dev, "Warning: hw must be disabled to set link mode\n");
+		ret = -EBUSY;
+	}
+	return ret;
+}
+
+static void encx24j600_hw_get_macaddr(struct encx24j600_priv *priv,
+				      unsigned char *ethaddr)
+{
+	unsigned short val;
+
+	val = encx24j600_read_reg(priv, MAADR1);
+
+	ethaddr[0] = val & 0x00ff;
+	ethaddr[1] = (val & 0xff00) >> 8;
+
+	val = encx24j600_read_reg(priv, MAADR2);
+
+	ethaddr[2] = val & 0x00ffU;
+	ethaddr[3] = (val & 0xff00U) >> 8;
+
+	val = encx24j600_read_reg(priv, MAADR3);
+
+	ethaddr[4] = val & 0x00ffU;
+	ethaddr[5] = (val & 0xff00U) >> 8;
+}
+
+/* Program the hardware MAC address from dev->dev_addr.*/
+static int encx24j600_set_hw_macaddr(struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+
+	if (priv->hw_enabled) {
+		netif_info(priv, drv, dev, "Hardware must be disabled to set Mac address\n");
+		return -EBUSY;
+	}
+
+	mutex_lock(&priv->lock);
+
+	netif_info(priv, drv, dev, "%s: Setting MAC address to %pM\n",
+		   dev->name, dev->dev_addr);
+
+	encx24j600_write_reg(priv, MAADR3, (dev->dev_addr[4] |
+			     dev->dev_addr[5] << 8));
+	encx24j600_write_reg(priv, MAADR2, (dev->dev_addr[2] |
+			     dev->dev_addr[3] << 8));
+	encx24j600_write_reg(priv, MAADR1, (dev->dev_addr[0] |
+			     dev->dev_addr[1] << 8));
+
+	mutex_unlock(&priv->lock);
+
+	return 0;
+}
+
+/* Store the new hardware address in dev->dev_addr, and update the MAC.*/
+static int encx24j600_set_mac_address(struct net_device *dev, void *addr)
+{
+	struct sockaddr *address = addr;
+
+	if (netif_running(dev))
+		return -EBUSY;
+	if (!is_valid_ether_addr(address->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+	return encx24j600_set_hw_macaddr(dev);
+}
+
+static int encx24j600_open(struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+
+	int ret = request_threaded_irq(priv->ctx.spi->irq, NULL, encx24j600_isr,
+				       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+				       DRV_NAME, priv);
+	if (unlikely(ret < 0)) {
+		netdev_err(dev, "request irq %d failed (ret = %d)\n",
+			   priv->ctx.spi->irq, ret);
+		return ret;
+	}
+
+	encx24j600_hw_disable(priv);
+	encx24j600_hw_init(priv);
+	encx24j600_hw_enable(priv);
+	netif_start_queue(dev);
+
+	return 0;
+}
+
+static int encx24j600_stop(struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	free_irq(priv->ctx.spi->irq, priv);
+	return 0;
+}
+
+static void encx24j600_setrx_proc(struct kthread_work *ws)
+{
+	struct encx24j600_priv *priv =
+			container_of(ws, struct encx24j600_priv, setrx_work);
+
+	mutex_lock(&priv->lock);
+	encx24j600_set_rxfilter_mode(priv);
+	mutex_unlock(&priv->lock);
+}
+
+static void encx24j600_set_multicast_list(struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+	int oldfilter = priv->rxfilter;
+
+	if (dev->flags & IFF_PROMISC) {
+		netif_dbg(priv, link, dev, "promiscuous mode\n");
+		priv->rxfilter = RXFILTER_PROMISC;
+	} else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
+		netif_dbg(priv, link, dev, "%smulticast mode\n",
+			  (dev->flags & IFF_ALLMULTI) ? "all-" : "");
+		priv->rxfilter = RXFILTER_MULTI;
+	} else {
+		netif_dbg(priv, link, dev, "normal mode\n");
+		priv->rxfilter = RXFILTER_NORMAL;
+	}
+
+	if (oldfilter != priv->rxfilter)
+		queue_kthread_work(&priv->kworker, &priv->setrx_work);
+}
+
+static void encx24j600_hw_tx(struct encx24j600_priv *priv)
+{
+	struct net_device *dev = priv->ndev;
+	netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n",
+		   priv->tx_skb->len);
+
+	if (netif_msg_pktdata(priv))
+		dump_packet("TX", priv->tx_skb->len, priv->tx_skb->data);
+
+	if (encx24j600_read_reg(priv, EIR) & TXABTIF)
+		/* Last transmition aborted due to error. Reset TX interface */
+		encx24j600_reset_hw_tx(priv);
+
+	/* Clear the TXIF flag if were previously set */
+	encx24j600_clr_bits(priv, EIR, TXIF);
+
+	/* Set the data pointer to the TX buffer address in the SRAM */
+	encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
+
+	/* Copy the packet into the SRAM */
+	encx24j600_raw_write(priv, WGPDATA, (u8 *)priv->tx_skb->data,
+			     priv->tx_skb->len);
+
+	/* Program the Tx buffer start pointer */
+	encx24j600_write_reg(priv, ETXST, ENC_TX_BUF_START);
+
+	/* Program the packet length */
+	encx24j600_write_reg(priv, ETXLEN, priv->tx_skb->len);
+
+	/* Start the transmission */
+	encx24j600_cmd(priv, SETTXRTS);
+}
+
+static void encx24j600_tx_proc(struct kthread_work *ws)
+{
+	struct encx24j600_priv *priv =
+			container_of(ws, struct encx24j600_priv, tx_work);
+
+	mutex_lock(&priv->lock);
+	encx24j600_hw_tx(priv);
+	mutex_unlock(&priv->lock);
+}
+
+static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+
+	/* save the timestamp */
+	dev->trans_start = jiffies;
+
+	/* Remember the skb for deferred processing */
+	priv->tx_skb = skb;
+
+	queue_kthread_work(&priv->kworker, &priv->tx_work);
+
+	return NETDEV_TX_OK;
+}
+
+/* Deal with a transmit timeout */
+static void encx24j600_tx_timeout(struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+
+	netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n",
+		  jiffies, jiffies - dev->trans_start);
+
+	dev->stats.tx_errors++;
+	netif_wake_queue(dev);
+	return;
+}
+
+static int encx24j600_get_regs_len(struct net_device *dev)
+{
+	return SFR_REG_COUNT;
+}
+
+static void encx24j600_get_regs(struct net_device *dev,
+				struct ethtool_regs *regs, void *p)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+	u16 *buff = p;
+	u8 reg;
+
+	regs->version = 1;
+	mutex_lock(&priv->lock);
+	for (reg = 0; reg < SFR_REG_COUNT; reg += 2) {
+		unsigned int val = 0;
+		/* ignore errors for unreadable registers */
+		regmap_read(priv->ctx.regmap, reg, &val);
+		buff[reg] = val & 0xffff;
+	}
+	mutex_unlock(&priv->lock);
+}
+
+static void encx24j600_get_drvinfo(struct net_device *dev,
+				   struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, dev_name(dev->dev.parent),
+		sizeof(info->bus_info));
+}
+
+static int encx24j600_get_settings(struct net_device *dev,
+				   struct ethtool_cmd *cmd)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+
+	cmd->transceiver = XCVR_INTERNAL;
+	cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+			 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+			 SUPPORTED_Autoneg | SUPPORTED_TP;
+
+	ethtool_cmd_speed_set(cmd, priv->speed);
+	cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+	cmd->port = PORT_TP;
+	cmd->autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+	return 0;
+}
+
+static int encx24j600_set_settings(struct net_device *dev,
+				   struct ethtool_cmd *cmd)
+{
+	return encx24j600_setlink(dev, cmd->autoneg,
+				  ethtool_cmd_speed(cmd), cmd->duplex);
+}
+
+static u32 encx24j600_get_msglevel(struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+	return priv->msg_enable;
+}
+
+static void encx24j600_set_msglevel(struct net_device *dev, u32 val)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+	priv->msg_enable = val;
+}
+
+static const struct ethtool_ops encx24j600_ethtool_ops = {
+	.get_settings = encx24j600_get_settings,
+	.set_settings = encx24j600_set_settings,
+	.get_drvinfo = encx24j600_get_drvinfo,
+	.get_msglevel = encx24j600_get_msglevel,
+	.set_msglevel = encx24j600_set_msglevel,
+	.get_regs_len = encx24j600_get_regs_len,
+	.get_regs = encx24j600_get_regs,
+};
+
+static const struct net_device_ops encx24j600_netdev_ops = {
+	.ndo_open = encx24j600_open,
+	.ndo_stop = encx24j600_stop,
+	.ndo_start_xmit = encx24j600_tx,
+	.ndo_set_rx_mode = encx24j600_set_multicast_list,
+	.ndo_set_mac_address = encx24j600_set_mac_address,
+	.ndo_tx_timeout = encx24j600_tx_timeout,
+	.ndo_validate_addr = eth_validate_addr,
+};
+
+static int encx24j600_spi_probe(struct spi_device *spi)
+{
+	int ret;
+
+	struct net_device *ndev;
+	struct encx24j600_priv *priv;
+
+	ndev = alloc_etherdev(sizeof(struct encx24j600_priv));
+
+	if (!ndev) {
+		ret = -ENOMEM;
+		goto error_out;
+	}
+
+	priv = netdev_priv(ndev);
+	spi_set_drvdata(spi, priv);
+	dev_set_drvdata(&spi->dev, priv);
+	SET_NETDEV_DEV(ndev, &spi->dev);
+
+	priv->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+	priv->ndev = ndev;
+
+	/* Default configuration PHY configuration */
+	priv->full_duplex = true;
+	priv->autoneg = AUTONEG_ENABLE;
+	priv->speed = SPEED_100;
+
+	priv->ctx.spi = spi;
+	devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
+	ndev->irq = spi->irq;
+	ndev->netdev_ops = &encx24j600_netdev_ops;
+
+	mutex_init(&priv->lock);
+
+	/* Reset device and check if it is connected */
+	if (encx24j600_hw_reset(priv)) {
+		netif_err(priv, probe, ndev,
+			  DRV_NAME ": Chip is not detected\n");
+		ret = -EIO;
+		goto out_free;
+	}
+
+	/* Initialize the device HW to the consistent state */
+	if (encx24j600_hw_init(priv)) {
+		netif_err(priv, probe, ndev,
+			  DRV_NAME ": HW initialization error\n");
+		ret = -EIO;
+		goto out_free;
+	}
+
+	init_kthread_worker(&priv->kworker);
+	init_kthread_work(&priv->tx_work, encx24j600_tx_proc);
+	init_kthread_work(&priv->setrx_work, encx24j600_setrx_proc);
+
+	priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
+					 "encx24j600");
+
+	if (IS_ERR(priv->kworker_task)) {
+		ret = PTR_ERR(priv->kworker_task);
+		goto out_free;
+	}
+
+	/* Get the MAC address from the chip */
+	encx24j600_hw_get_macaddr(priv, ndev->dev_addr);
+
+	ndev->ethtool_ops = &encx24j600_ethtool_ops;
+
+	ret = register_netdev(ndev);
+	if (unlikely(ret)) {
+		netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
+			  ret);
+		goto out_free;
+	}
+
+	netif_info(priv, drv, priv->ndev, "MAC address %pM\n", ndev->dev_addr);
+
+	return ret;
+
+out_free:
+	free_netdev(ndev);
+
+error_out:
+	return ret;
+}
+
+static int encx24j600_spi_remove(struct spi_device *spi)
+{
+	struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
+
+	unregister_netdev(priv->ndev);
+
+	free_netdev(priv->ndev);
+
+	return 0;
+}
+
+static const struct spi_device_id encx24j600_spi_id_table = {
+	.name = "encx24j600"
+};
+
+static struct spi_driver encx24j600_spi_net_driver = {
+	.driver = {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+		.bus	= &spi_bus_type,
+	},
+	.probe		= encx24j600_spi_probe,
+	.remove		= encx24j600_spi_remove,
+	.id_table	= &encx24j600_spi_id_table,
+};
+
+static int __init encx24j600_init(void)
+{
+	return spi_register_driver(&encx24j600_spi_net_driver);
+}
+module_init(encx24j600_init);
+
+static void encx24j600_exit(void)
+{
+	spi_unregister_driver(&encx24j600_spi_net_driver);
+}
+module_exit(encx24j600_exit);
+
+MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
+MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
new file mode 100644
index 0000000..4be73d5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -0,0 +1,437 @@
+/**
+ * encx24j600_hw.h: Register definitions
+ *
+ */
+
+#ifndef _ENCX24J600_HW_H
+#define _ENCX24J600_HW_H
+
+struct encx24j600_context {
+	struct spi_device *spi;
+	struct regmap *regmap;
+	struct regmap *phymap;
+	struct mutex mutex; /* mutex to protect access to regmap */
+	int bank;
+};
+
+void devm_regmap_init_encx24j600(struct device *dev,
+				 struct encx24j600_context *ctx);
+
+/* Single-byte instructions */
+#define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1))
+#define B0SEL 0xC0		/* Bank 0 Select */
+#define B1SEL 0xC2		/* Bank 1 Select */
+#define B2SEL 0xC4		/* Bank 2 Select */
+#define B3SEL 0xC6		/* Bank 3 Select */
+#define SETETHRST 0xCA		/* System Reset */
+#define FCDISABLE 0xE0		/* Flow Control Disable */
+#define FCSINGLE 0xE2		/* Flow Control Single */
+#define FCMULTIPLE 0xE4		/* Flow Control Multiple */
+#define FCCLEAR 0xE6		/* Flow Control Clear */
+#define SETPKTDEC 0xCC		/* Decrement Packet Counter */
+#define DMASTOP 0xD2		/* DMA Stop */
+#define DMACKSUM 0xD8		/* DMA Start Checksum */
+#define DMACKSUMS 0xDA		/* DMA Start Checksum with Seed */
+#define DMACOPY 0xDC		/* DMA Start Copy */
+#define DMACOPYS 0xDE		/* DMA Start Copy and Checksum with Seed */
+#define SETTXRTS 0xD4		/* Request Packet Transmission */
+#define ENABLERX 0xE8		/* Enable RX */
+#define DISABLERX 0xEA		/* Disable RX */
+#define SETEIE 0xEC		/* Enable Interrupts */
+#define CLREIE 0xEE		/* Disable Interrupts */
+
+/* Two byte instructions */
+#define RBSEL 0xC8		/* Read Bank Select */
+
+/* Three byte instructions */
+#define WGPRDPT 0x60		/* Write EGPRDPT */
+#define RGPRDPT 0x62		/* Read EGPRDPT */
+#define WRXRDPT 0x64		/* Write ERXRDPT */
+#define RRXRDPT 0x66		/* Read ERXRDPT */
+#define WUDARDPT 0x68		/* Write EUDARDPT */
+#define RUDARDPT 0x6A		/* Read EUDARDPT */
+#define WGPWRPT 0x6C		/* Write EGPWRPT */
+#define RGPWRPT 0x6E		/* Read EGPWRPT */
+#define WRXWRPT 0x70		/* Write ERXWRPT */
+#define RRXWRPT 0x72		/* Read ERXWRPT */
+#define WUDAWRPT 0x74		/* Write EUDAWRPT */
+#define RUDAWRPT 0x76		/* Read EUDAWRPT */
+
+/* n byte instructions */
+#define RCRCODE 0x00
+#define WCRCODE 0x40
+#define BFSCODE 0x80
+#define BFCCODE 0xA0
+#define RCR(addr) (RCRCODE | (addr & ADDR_MASK)) /* Read Control Register */
+#define WCR(addr) (WCRCODE | (addr & ADDR_MASK)) /* Write Control Register */
+#define RCRU 0x20		/* Read Control Register Unbanked */
+#define WCRU 0x22		/* Write Control Register Unbanked */
+#define BFS(addr) (BFSCODE | (addr & ADDR_MASK)) /* Bit Field Set */
+#define BFC(addr) (BFCCODE | (addr & ADDR_MASK)) /* Bit Field Clear */
+#define BFSU 0x24		/* Bit Field Set Unbanked */
+#define BFCU 0x26		/* Bit Field Clear Unbanked */
+#define RGPDATA 0x28		/* Read EGPDATA */
+#define WGPDATA 0x2A		/* Write EGPDATA */
+#define RRXDATA 0x2C		/* Read ERXDATA */
+#define WRXDATA 0x2E		/* Write ERXDATA */
+#define RUDADATA 0x30		/* Read EUDADATA */
+#define WUDADATA 0x32		/* Write EUDADATA */
+
+#define SFR_REG_COUNT	0xA0
+
+/* ENC424J600 Control Registers
+ * Control register definitions are a combination of address
+ * and bank number
+ * - Register address (bits 0-4)
+ * - Bank number (bits 5-6)
+ */
+#define ADDR_MASK 0x1F
+#define BANK_MASK 0x60
+#define BANK_SHIFT 5
+
+/* All-bank registers */
+#define EUDAST 0x16
+#define EUDAND 0x18
+#define ESTAT 0x1A
+#define EIR 0x1C
+#define ECON1 0x1E
+
+/* Bank 0 registers */
+#define ETXST (0x00 | 0x00)
+#define ETXLEN (0x02 | 0x00)
+#define ERXST (0x04 | 0x00)
+#define ERXTAIL (0x06 | 0x00)
+#define ERXHEAD (0x08 | 0x00)
+#define EDMAST (0x0A | 0x00)
+#define EDMALEN (0x0C | 0x00)
+#define EDMADST (0x0E | 0x00)
+#define EDMACS (0x10 | 0x00)
+#define ETXSTAT (0x12 | 0x00)
+#define ETXWIRE (0x14 | 0x00)
+
+/* Bank 1 registers */
+#define EHT1 (0x00 | 0x20)
+#define EHT2 (0x02 | 0x20)
+#define EHT3 (0x04 | 0x20)
+#define EHT4 (0x06 | 0x20)
+#define EPMM1 (0x08 | 0x20)
+#define EPMM2 (0x0A | 0x20)
+#define EPMM3 (0x0C | 0x20)
+#define EPMM4 (0x0E | 0x20)
+#define EPMCS (0x10 | 0x20)
+#define EPMO (0x12 | 0x20)
+#define ERXFCON (0x14 | 0x20)
+
+/* Bank 2 registers */
+#define MACON1 (0x00 | 0x40)
+#define MACON2 (0x02 | 0x40)
+#define MABBIPG (0x04 | 0x40)
+#define MAIPG (0x06 | 0x40)
+#define MACLCON (0x08 | 0x40)
+#define MAMXFL (0x0A | 0x40)
+#define MICMD (0x12 | 0x40)
+#define MIREGADR (0x14 | 0x40)
+
+/* Bank 3 registers */
+#define MAADR3 (0x00 | 0x60)
+#define MAADR2 (0x02 | 0x60)
+#define MAADR1 (0x04 | 0x60)
+#define MIWR (0x06 | 0x60)
+#define MIRD (0x08 | 0x60)
+#define MISTAT (0x0A | 0x60)
+#define EPAUS (0x0C | 0x60)
+#define ECON2 (0x0E | 0x60)
+#define ERXWM (0x10 | 0x60)
+#define EIE (0x12 | 0x60)
+#define EIDLED (0x14 | 0x60)
+
+/* Unbanked registers */
+#define EGPDATA (0x00 | 0x80)
+#define ERXDATA (0x02 | 0x80)
+#define EUDADATA (0x04 | 0x80)
+#define EGPRDPT (0x06 | 0x80)
+#define EGPWRPT (0x08 | 0x80)
+#define ERXRDPT (0x0A | 0x80)
+#define ERXWRPT (0x0C | 0x80)
+#define EUDARDPT (0x0E | 0x80)
+#define EUDAWRPT (0x10 | 0x80)
+
+
+/* Register bit definitions */
+/* ESTAT */
+#define INT (1 << 15)
+#define FCIDLE (1 << 14)
+#define RXBUSY (1 << 13)
+#define CLKRDY (1 << 12)
+#define PHYDPX (1 << 10)
+#define PHYLNK (1 << 8)
+
+/* EIR */
+#define CRYPTEN (1 << 15)
+#define MODEXIF (1 << 14)
+#define HASHIF (1 << 13)
+#define AESIF (1 << 12)
+#define LINKIF (1 << 11)
+#define PKTIF (1 << 6)
+#define DMAIF (1 << 5)
+#define TXIF (1 << 3)
+#define TXABTIF (1 << 2)
+#define RXABTIF (1 << 1)
+#define PCFULIF (1 << 0)
+
+/* ECON1 */
+#define MODEXST (1 << 15)
+#define HASHEN (1 << 14)
+#define HASHOP (1 << 13)
+#define HASHLST (1 << 12)
+#define AESST (1 << 11)
+#define AESOP1 (1 << 10)
+#define AESOP0 (1 << 9)
+#define PKTDEC (1 << 8)
+#define FCOP1 (1 << 7)
+#define FCOP0 (1 << 6)
+#define DMAST (1 << 5)
+#define DMACPY (1 << 4)
+#define DMACSSD (1 << 3)
+#define DMANOCS (1 << 2)
+#define TXRTS (1 << 1)
+#define RXEN (1 << 0)
+
+/* ETXSTAT */
+#define LATECOL (1 << 10)
+#define MAXCOL (1 << 9)
+#define EXDEFER (1 << 8)
+#define ETXSTATL_DEFER (1 << 7)
+#define CRCBAD (1 << 4)
+#define COLCNT_MASK 0xF
+
+/* ERXFCON */
+#define HTEN (1 << 15)
+#define MPEN (1 << 14)
+#define NOTPM (1 << 12)
+#define PMEN3 (1 << 11)
+#define PMEN2 (1 << 10)
+#define PMEN1 (1 << 9)
+#define PMEN0 (1 << 8)
+#define CRCEEN (1 << 7)
+#define CRCEN (1 << 6)
+#define RUNTEEN (1 << 5)
+#define RUNTEN (1 << 4)
+#define UCEN (1 << 3)
+#define NOTMEEN (1 << 2)
+#define MCEN (1 << 1)
+#define BCEN (1 << 0)
+
+/* MACON1 */
+#define LOOPBK (1 << 4)
+#define RXPAUS (1 << 2)
+#define PASSALL (1 << 1)
+
+/* MACON2 */
+#define MACON2_DEFER (1 << 14)
+#define BPEN (1 << 13)
+#define NOBKOFF (1 << 12)
+#define PADCFG2 (1 << 7)
+#define PADCFG1 (1 << 6)
+#define PADCFG0 (1 << 5)
+#define TXCRCEN (1 << 4)
+#define PHDREN (1 << 3)
+#define HFRMEN (1 << 2)
+#define MACON2_RSV1 (1 << 1)
+#define FULDPX (1 << 0)
+
+/* MAIPG */
+/* value of the high byte is given by the reserved bits,
+ * value of the low byte is recomended setting of the
+ * IPG parameter.
+ */
+#define MAIPGH_VAL 0x0C
+#define MAIPGL_VAL 0x12
+
+/* MIREGADRH */
+#define MIREGADR_VAL (1 << 8)
+
+/* MIREGADRL */
+#define PHREG_MASK 0x1F
+
+/* MICMD */
+#define MIISCAN (1 << 1)
+#define MIIRD (1 << 0)
+
+/* MISTAT */
+#define NVALID (1 << 2)
+#define SCAN (1 << 1)
+#define BUSY (1 << 0)
+
+/* ECON2 */
+#define ETHEN (1 << 15)
+#define STRCH (1 << 14)
+#define TXMAC (1 << 13)
+#define SHA1MD5 (1 << 12)
+#define COCON3 (1 << 11)
+#define COCON2 (1 << 10)
+#define COCON1 (1 << 9)
+#define COCON0 (1 << 8)
+#define AUTOFC (1 << 7)
+#define TXRST (1 << 6)
+#define RXRST (1 << 5)
+#define ETHRST (1 << 4)
+#define MODLEN1 (1 << 3)
+#define MODLEN0 (1 << 2)
+#define AESLEN1 (1 << 1)
+#define AESLEN0 (1 << 0)
+
+/* EIE */
+#define INTIE (1 << 15)
+#define MODEXIE (1 << 14)
+#define HASHIE (1 << 13)
+#define AESIE (1 << 12)
+#define LINKIE (1 << 11)
+#define PKTIE (1 << 6)
+#define DMAIE (1 << 5)
+#define TXIE (1 << 3)
+#define TXABTIE (1 << 2)
+#define RXABTIE (1 << 1)
+#define PCFULIE (1 << 0)
+
+/* EIDLED */
+#define LACFG3 (1 << 15)
+#define LACFG2 (1 << 14)
+#define LACFG1 (1 << 13)
+#define LACFG0 (1 << 12)
+#define LBCFG3 (1 << 11)
+#define LBCFG2 (1 << 10)
+#define LBCFG1 (1 << 9)
+#define LBCFG0 (1 << 8)
+#define DEVID_SHIFT 5
+#define DEVID_MASK (0x7 << DEVID_SHIFT)
+#define REVID_SHIFT 0
+#define REVID_MASK (0x1F << REVID_SHIFT)
+
+/* PHY registers */
+#define PHCON1 0x00
+#define PHSTAT1 0x01
+#define PHANA 0x04
+#define PHANLPA 0x05
+#define PHANE 0x06
+#define PHCON2 0x11
+#define PHSTAT2 0x1B
+#define PHSTAT3 0x1F
+
+/* PHCON1 */
+#define PRST (1 << 15)
+#define PLOOPBK (1 << 14)
+#define SPD100 (1 << 13)
+#define ANEN (1 << 12)
+#define PSLEEP (1 << 11)
+#define RENEG (1 << 9)
+#define PFULDPX (1 << 8)
+
+/* PHSTAT1 */
+#define FULL100 (1 << 14)
+#define HALF100 (1 << 13)
+#define FULL10 (1 << 12)
+#define HALF10 (1 << 11)
+#define ANDONE (1 << 5)
+#define LRFAULT (1 << 4)
+#define ANABLE (1 << 3)
+#define LLSTAT (1 << 2)
+#define EXTREGS (1 << 0)
+
+/* PHSTAT2 */
+#define PLRITY (1 << 4)
+
+/* PHSTAT3 */
+#define PHY3SPD100 (1 << 3)
+#define PHY3DPX (1 << 4)
+#define SPDDPX_SHIFT 2
+#define SPDDPX_MASK (0x7 << SPDDPX_SHIFT)
+
+/* PHANA */
+/* Default value for PHY initialization*/
+#define PHANA_DEFAULT 0x05E1
+
+/* PHANE */
+#define PDFLT (1 << 4)
+#define LPARCD (1 << 1)
+#define LPANABL (1 << 0)
+
+#define EUDAST_TEST_VAL 0x1234
+
+#define TSV_SIZE 7
+
+#define ENCX24J600_DEV_ID 0x1
+
+/* Configuration */
+
+/* Led is on when the link is present and driven low
+ * temporarily when packet is TX'd or RX'd
+ */
+#define LED_A_SETTINGS 0xC
+
+/* Led is on if the link is in 100 Mbps mode */
+#define LED_B_SETTINGS 0x8
+
+/* maximum ethernet frame length
+ * Currently not used as a limit anywhere
+ * (we're using the "huge frame enable" feature of
+ * enc424j600).
+ */
+#define MAX_FRAMELEN 1518
+
+/* Size in bytes of the receive buffer in enc424j600.
+ * Must be word aligned (even).
+ */
+#define RX_BUFFER_SIZE (15 * MAX_FRAMELEN)
+
+/* Start of the general purpose area in sram */
+#define SRAM_GP_START 0x0
+
+/* SRAM size */
+#define SRAM_SIZE 0x6000
+
+/* Start of the receive buffer */
+#define ERXST_VAL (SRAM_SIZE - RX_BUFFER_SIZE)
+
+#define RSV_RXLONGEVDROPEV	16
+#define RSV_CARRIEREV		18
+#define RSV_CRCERROR		20
+#define RSV_LENCHECKERR		21
+#define RSV_LENOUTOFRANGE	22
+#define RSV_RXOK		23
+#define RSV_RXMULTICAST		24
+#define RSV_RXBROADCAST		25
+#define RSV_DRIBBLENIBBLE	26
+#define RSV_RXCONTROLFRAME	27
+#define RSV_RXPAUSEFRAME	28
+#define RSV_RXUNKNOWNOPCODE	29
+#define RSV_RXTYPEVLAN		30
+
+#define RSV_RUNTFILTERMATCH	31
+#define RSV_NOTMEFILTERMATCH	32
+#define RSV_HASHFILTERMATCH	33
+#define RSV_MAGICPKTFILTERMATCH	34
+#define RSV_PTRNMTCHFILTERMATCH	35
+#define RSV_UNICASTFILTERMATCH	36
+
+#define RSV_SIZE		8
+#define RSV_BITMASK(x)		(1 << ((x) - 16))
+#define RSV_GETBIT(x, y)	(((x) & RSV_BITMASK(y)) ? 1 : 0)
+
+struct rsv {
+	u16 next_packet;
+	u16 len;
+	u32 rxstat;
+};
+
+/* Put RX buffer at 0 as suggested by the Errata datasheet */
+
+#define RXSTART_INIT		ERXST_VAL
+#define RXEND_INIT		0x5FFF
+
+int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
+				size_t count);
+int regmap_encx24j600_spi_read(void *context, u8 reg, u8 *data, size_t count);
+
+
+#endif
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index becbb5f..a10c928 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -552,6 +552,7 @@
 	{ .compatible = "moxa,moxart-mac" },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, moxart_mac_match);
 
 static struct platform_driver moxart_mac_driver = {
 	.probe	= moxart_mac_probe,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 06bcc73..d6696cf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -536,6 +536,7 @@
 	u8 extend_lb_time;
 	u8 phys_port_id[ETH_ALEN];
 	u8 lb_mode;
+	u8 vxlan_port_count;
 	u16 vxlan_port;
 	struct device *hwmon_dev;
 	u32 post_mode;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 8b08b20..d448145 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -483,11 +483,17 @@
 	/* Adapter supports only one VXLAN port. Use very first port
 	 * for enabling offload
 	 */
-	if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port)
+	if (!qlcnic_encap_rx_offload(adapter))
 		return;
+	if (!ahw->vxlan_port_count) {
+		ahw->vxlan_port_count = 1;
+		ahw->vxlan_port = ntohs(port);
+		adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
+		return;
+	}
+	if (ahw->vxlan_port == ntohs(port))
+		ahw->vxlan_port_count++;
 
-	ahw->vxlan_port = ntohs(port);
-	adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
 }
 
 static void qlcnic_del_vxlan_port(struct net_device *netdev,
@@ -496,11 +502,13 @@
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 
-	if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port ||
+	if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count ||
 	    (ahw->vxlan_port != ntohs(port)))
 		return;
 
-	adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
+	ahw->vxlan_port_count--;
+	if (!ahw->vxlan_port_count)
+		adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
 }
 
 static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index d79e33b..deae10d 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -157,6 +157,7 @@
 	NWayAdvert	= 0x66, /* MII ADVERTISE */
 	NWayLPAR	= 0x68, /* MII LPA */
 	NWayExpansion	= 0x6A, /* MII Expansion */
+	TxDmaOkLowDesc  = 0x82, /* Low 16 bit address of a Tx descriptor. */
 	Config5		= 0xD8,	/* Config5 */
 	TxPoll		= 0xD9,	/* Tell chip to check Tx descriptors for work */
 	RxMaxSize	= 0xDA, /* Max size of an Rx packet (8169 only) */
@@ -174,7 +175,7 @@
 	LastFrag	= (1 << 28), /* Final segment of a packet */
 	LargeSend	= (1 << 27), /* TCP Large Send Offload (TSO) */
 	MSSShift	= 16,	     /* MSS value position */
-	MSSMask		= 0xfff,     /* MSS value: 11 bits */
+	MSSMask		= 0x7ff,     /* MSS value: 11 bits */
 	TxError		= (1 << 23), /* Tx error summary */
 	RxError		= (1 << 20), /* Rx error summary */
 	IPCS		= (1 << 18), /* Calculate IP checksum */
@@ -341,6 +342,7 @@
 	unsigned		tx_tail;
 	struct cp_desc		*tx_ring;
 	struct sk_buff		*tx_skb[CP_TX_RING_SIZE];
+	u32			tx_opts[CP_TX_RING_SIZE];
 
 	unsigned		rx_buf_sz;
 	unsigned		wol_enabled : 1; /* Is Wake-on-LAN enabled? */
@@ -665,7 +667,7 @@
 		BUG_ON(!skb);
 
 		dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
-				 le32_to_cpu(txd->opts1) & 0xffff,
+				 cp->tx_opts[tx_tail] & 0xffff,
 				 PCI_DMA_TODEVICE);
 
 		if (status & LastFrag) {
@@ -733,7 +735,7 @@
 {
 	struct cp_private *cp = netdev_priv(dev);
 	unsigned entry;
-	u32 eor, flags;
+	u32 eor, opts1;
 	unsigned long intr_flags;
 	__le32 opts2;
 	int mss = 0;
@@ -752,7 +754,28 @@
 	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 	mss = skb_shinfo(skb)->gso_size;
 
+	if (mss > MSSMask) {
+		WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n",
+			  mss);
+		goto out_dma_error;
+	}
+
 	opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
+	opts1 = DescOwn;
+	if (mss)
+		opts1 |= LargeSend | (mss << MSSShift);
+	else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		const struct iphdr *ip = ip_hdr(skb);
+		if (ip->protocol == IPPROTO_TCP)
+			opts1 |= IPCS | TCPCS;
+		else if (ip->protocol == IPPROTO_UDP)
+			opts1 |= IPCS | UDPCS;
+		else {
+			WARN_ONCE(1,
+				  "Net bug: asked to checksum invalid Legacy IP packet\n");
+			goto out_dma_error;
+		}
+	}
 
 	if (skb_shinfo(skb)->nr_frags == 0) {
 		struct cp_desc *txd = &cp->tx_ring[entry];
@@ -768,31 +791,20 @@
 		txd->addr = cpu_to_le64(mapping);
 		wmb();
 
-		flags = eor | len | DescOwn | FirstFrag | LastFrag;
+		opts1 |= eor | len | FirstFrag | LastFrag;
 
-		if (mss)
-			flags |= LargeSend | ((mss & MSSMask) << MSSShift);
-		else if (skb->ip_summed == CHECKSUM_PARTIAL) {
-			const struct iphdr *ip = ip_hdr(skb);
-			if (ip->protocol == IPPROTO_TCP)
-				flags |= IPCS | TCPCS;
-			else if (ip->protocol == IPPROTO_UDP)
-				flags |= IPCS | UDPCS;
-			else
-				WARN_ON(1);	/* we need a WARN() */
-		}
-
-		txd->opts1 = cpu_to_le32(flags);
+		txd->opts1 = cpu_to_le32(opts1);
 		wmb();
 
 		cp->tx_skb[entry] = skb;
-		entry = NEXT_TX(entry);
+		cp->tx_opts[entry] = opts1;
+		netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
+			  entry, skb->len);
 	} else {
 		struct cp_desc *txd;
-		u32 first_len, first_eor;
+		u32 first_len, first_eor, ctrl;
 		dma_addr_t first_mapping;
 		int frag, first_entry = entry;
-		const struct iphdr *ip = ip_hdr(skb);
 
 		/* We must give this initial chunk to the device last.
 		 * Otherwise we could race with the device.
@@ -805,14 +817,14 @@
 			goto out_dma_error;
 
 		cp->tx_skb[entry] = skb;
-		entry = NEXT_TX(entry);
 
 		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
 			const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
 			u32 len;
-			u32 ctrl;
 			dma_addr_t mapping;
 
+			entry = NEXT_TX(entry);
+
 			len = skb_frag_size(this_frag);
 			mapping = dma_map_single(&cp->pdev->dev,
 						 skb_frag_address(this_frag),
@@ -824,19 +836,7 @@
 
 			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 
-			ctrl = eor | len | DescOwn;
-
-			if (mss)
-				ctrl |= LargeSend |
-					((mss & MSSMask) << MSSShift);
-			else if (skb->ip_summed == CHECKSUM_PARTIAL) {
-				if (ip->protocol == IPPROTO_TCP)
-					ctrl |= IPCS | TCPCS;
-				else if (ip->protocol == IPPROTO_UDP)
-					ctrl |= IPCS | UDPCS;
-				else
-					BUG();
-			}
+			ctrl = opts1 | eor | len;
 
 			if (frag == skb_shinfo(skb)->nr_frags - 1)
 				ctrl |= LastFrag;
@@ -849,8 +849,8 @@
 			txd->opts1 = cpu_to_le32(ctrl);
 			wmb();
 
+			cp->tx_opts[entry] = ctrl;
 			cp->tx_skb[entry] = skb;
-			entry = NEXT_TX(entry);
 		}
 
 		txd = &cp->tx_ring[first_entry];
@@ -858,27 +858,17 @@
 		txd->addr = cpu_to_le64(first_mapping);
 		wmb();
 
-		if (skb->ip_summed == CHECKSUM_PARTIAL) {
-			if (ip->protocol == IPPROTO_TCP)
-				txd->opts1 = cpu_to_le32(first_eor | first_len |
-							 FirstFrag | DescOwn |
-							 IPCS | TCPCS);
-			else if (ip->protocol == IPPROTO_UDP)
-				txd->opts1 = cpu_to_le32(first_eor | first_len |
-							 FirstFrag | DescOwn |
-							 IPCS | UDPCS);
-			else
-				BUG();
-		} else
-			txd->opts1 = cpu_to_le32(first_eor | first_len |
-						 FirstFrag | DescOwn);
+		ctrl = opts1 | first_eor | first_len | FirstFrag;
+		txd->opts1 = cpu_to_le32(ctrl);
 		wmb();
+
+		cp->tx_opts[first_entry] = ctrl;
+		netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
+			  first_entry, entry, skb->len);
 	}
-	cp->tx_head = entry;
+	cp->tx_head = NEXT_TX(entry);
 
 	netdev_sent_queue(dev, skb->len);
-	netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
-		  entry, skb->len);
 	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
 		netif_stop_queue(dev);
 
@@ -1115,6 +1105,7 @@
 {
 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
 	cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
+	memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
 
 	cp_init_rings_index(cp);
 
@@ -1151,7 +1142,7 @@
 			desc = cp->rx_ring + i;
 			dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-			dev_kfree_skb(cp->rx_skb[i]);
+			dev_kfree_skb_any(cp->rx_skb[i]);
 		}
 	}
 
@@ -1164,7 +1155,7 @@
 					 le32_to_cpu(desc->opts1) & 0xffff,
 					 PCI_DMA_TODEVICE);
 			if (le32_to_cpu(desc->opts1) & LastFrag)
-				dev_kfree_skb(skb);
+				dev_kfree_skb_any(skb);
 			cp->dev->stats.tx_dropped++;
 		}
 	}
@@ -1172,6 +1163,7 @@
 
 	memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
+	memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
 
 	memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
 	memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
@@ -1249,7 +1241,7 @@
 {
 	struct cp_private *cp = netdev_priv(dev);
 	unsigned long flags;
-	int rc;
+	int rc, i;
 
 	netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
 		    cpr8(Cmd), cpr16(CpCmd),
@@ -1257,13 +1249,26 @@
 
 	spin_lock_irqsave(&cp->lock, flags);
 
+	netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
+		  cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
+	for (i = 0; i < CP_TX_RING_SIZE; i++) {
+		netif_dbg(cp, tx_err, cp->dev,
+			  "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
+			  i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
+			  cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
+			  le64_to_cpu(cp->tx_ring[i].addr),
+			  cp->tx_skb[i]);
+	}
+
 	cp_stop_hw(cp);
 	cp_clean_rings(cp);
 	rc = cp_init_rings(cp);
 	cp_start_hw(cp);
-	cp_enable_irq(cp);
+	__cp_set_rx_mode(dev);
+	cpw16_f(IntrMask, cp_norx_intr_mask);
 
 	netif_wake_queue(dev);
+	napi_schedule_irqoff(&cp->napi);
 
 	spin_unlock_irqrestore(&cp->lock, flags);
 }
@@ -1853,6 +1858,15 @@
 	pci_set_power_state (cp->pdev, PCI_D3hot);
 }
 
+static netdev_features_t cp_features_check(struct sk_buff *skb,
+					   struct net_device *dev,
+					   netdev_features_t features)
+{
+	if (skb_shinfo(skb)->gso_size > MSSMask)
+		features &= ~NETIF_F_TSO;
+
+	return vlan_features_check(skb, features);
+}
 static const struct net_device_ops cp_netdev_ops = {
 	.ndo_open		= cp_open,
 	.ndo_stop		= cp_close,
@@ -1865,6 +1879,7 @@
 	.ndo_tx_timeout		= cp_tx_timeout,
 	.ndo_set_features	= cp_set_features,
 	.ndo_change_mtu		= cp_change_mtu,
+	.ndo_features_check	= cp_features_check,
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= cp_poll_controller,
@@ -1984,12 +1999,12 @@
 	dev->ethtool_ops = &cp_ethtool_ops;
 	dev->watchdog_timeo = TX_TIMEOUT;
 
-	dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+	dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
 
 	if (pci_using_dac)
 		dev->features |= NETIF_F_HIGHDMA;
 
-	/* disabled by default until verified */
 	dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 2b32e0c..b4f2123 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6081,7 +6081,7 @@
 {
 	void __iomem *ioaddr = tp->mmio_addr;
 	struct pci_dev *pdev = tp->pci_dev;
-	u16 rg_saw_cnt;
+	int rg_saw_cnt;
 	u32 data;
 	static const struct ephy_info e_info_8168h_1[] = {
 		{ 0x1e, 0x0800,	0x0001 },
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index a157aaa..0623fff9 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -766,6 +766,11 @@
 	struct ravb_ptp_perout perout[N_PER_OUT];
 };
 
+enum ravb_chip_id {
+	RCAR_GEN2,
+	RCAR_GEN3,
+};
+
 struct ravb_private {
 	struct net_device *ndev;
 	struct platform_device *pdev;
@@ -806,6 +811,8 @@
 	int msg_enable;
 	int speed;
 	int duplex;
+	int emac_irq;
+	enum ravb_chip_id chip_id;
 
 	unsigned no_avb_link:1;
 	unsigned avb_link_active_low:1;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 450899e..8cc5ec5 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -201,7 +201,7 @@
 	if (priv->rx_ring[q]) {
 		ring_size = sizeof(struct ravb_ex_rx_desc) *
 			    (priv->num_rx_ring[q] + 1);
-		dma_free_coherent(NULL, ring_size, priv->rx_ring[q],
+		dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
 				  priv->rx_desc_dma[q]);
 		priv->rx_ring[q] = NULL;
 	}
@@ -209,7 +209,7 @@
 	if (priv->tx_ring[q]) {
 		ring_size = sizeof(struct ravb_tx_desc) *
 			    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
-		dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
+		dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
 				  priv->tx_desc_dma[q]);
 		priv->tx_ring[q] = NULL;
 	}
@@ -240,13 +240,13 @@
 		rx_desc = &priv->rx_ring[q][i];
 		/* The size of the buffer should be on 16-byte boundary. */
 		rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
-		dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
+		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
 					  ALIGN(PKT_BUF_SZ, 16),
 					  DMA_FROM_DEVICE);
 		/* We just set the data size to 0 for a failed mapping which
 		 * should prevent DMA from happening...
 		 */
-		if (dma_mapping_error(&ndev->dev, dma_addr))
+		if (dma_mapping_error(ndev->dev.parent, dma_addr))
 			rx_desc->ds_cc = cpu_to_le16(0);
 		rx_desc->dptr = cpu_to_le32(dma_addr);
 		rx_desc->die_dt = DT_FEMPTY;
@@ -309,7 +309,7 @@
 
 	/* Allocate all RX descriptors. */
 	ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
-	priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+	priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 					      &priv->rx_desc_dma[q],
 					      GFP_KERNEL);
 	if (!priv->rx_ring[q])
@@ -320,7 +320,7 @@
 	/* Allocate all TX descriptors. */
 	ring_size = sizeof(struct ravb_tx_desc) *
 		    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
-	priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+	priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 					      &priv->tx_desc_dma[q],
 					      GFP_KERNEL);
 	if (!priv->tx_ring[q])
@@ -443,7 +443,7 @@
 		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
 		/* Free the original skb. */
 		if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
-			dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 					 size, DMA_TO_DEVICE);
 			/* Last packet descriptor? */
 			if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
@@ -546,7 +546,7 @@
 
 			skb = priv->rx_skb[q][entry];
 			priv->rx_skb[q][entry] = NULL;
-			dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 					 ALIGN(PKT_BUF_SZ, 16),
 					 DMA_FROM_DEVICE);
 			get_ts &= (q == RAVB_NC) ?
@@ -586,14 +586,14 @@
 			if (!skb)
 				break;	/* Better luck next round. */
 			ravb_set_buffer_align(skb);
-			dma_addr = dma_map_single(&ndev->dev, skb->data,
+			dma_addr = dma_map_single(ndev->dev.parent, skb->data,
 						  le16_to_cpu(desc->ds_cc),
 						  DMA_FROM_DEVICE);
 			skb_checksum_none_assert(skb);
 			/* We just set the data size to 0 for a failed mapping
 			 * which should prevent DMA  from happening...
 			 */
-			if (dma_mapping_error(&ndev->dev, dma_addr))
+			if (dma_mapping_error(ndev->dev.parent, dma_addr))
 				desc->ds_cc = cpu_to_le16(0);
 			desc->dptr = cpu_to_le32(dma_addr);
 			priv->rx_skb[q][entry] = skb;
@@ -889,6 +889,22 @@
 		return -ENOENT;
 	}
 
+	/* This driver only support 10/100Mbit speeds on Gen3
+	 * at this time.
+	 */
+	if (priv->chip_id == RCAR_GEN3) {
+		int err;
+
+		err = phy_set_max_speed(phydev, SPEED_100);
+		if (err) {
+			netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
+			phy_disconnect(phydev);
+			return err;
+		}
+
+		netdev_info(ndev, "limited PHY to 100Mbit/s\n");
+	}
+
 	netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
 		    phydev->addr, phydev->irq, phydev->drv->name);
 
@@ -1197,6 +1213,15 @@
 		goto out_napi_off;
 	}
 
+	if (priv->chip_id == RCAR_GEN3) {
+		error = request_irq(priv->emac_irq, ravb_interrupt,
+				    IRQF_SHARED, ndev->name, ndev);
+		if (error) {
+			netdev_err(ndev, "cannot request IRQ\n");
+			goto out_free_irq;
+		}
+	}
+
 	/* Device init */
 	error = ravb_dmac_init(ndev);
 	if (error)
@@ -1220,6 +1245,7 @@
 	ravb_ptp_stop(ndev);
 out_free_irq:
 	free_irq(ndev->irq, ndev);
+	free_irq(priv->emac_irq, ndev);
 out_napi_off:
 	napi_disable(&priv->napi[RAVB_NC]);
 	napi_disable(&priv->napi[RAVB_BE]);
@@ -1300,8 +1326,8 @@
 		 entry / NUM_TX_DESC * DPTR_ALIGN;
 	len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
 	memcpy(buffer, skb->data, len);
-	dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
-	if (dma_mapping_error(&ndev->dev, dma_addr))
+	dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(ndev->dev.parent, dma_addr))
 		goto drop;
 
 	desc = &priv->tx_ring[q][entry];
@@ -1310,8 +1336,8 @@
 
 	buffer = skb->data + len;
 	len = skb->len - len;
-	dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
-	if (dma_mapping_error(&ndev->dev, dma_addr))
+	dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(ndev->dev.parent, dma_addr))
 		goto unmap;
 
 	desc++;
@@ -1323,7 +1349,7 @@
 		ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
 		if (!ts_skb) {
 			desc--;
-			dma_unmap_single(&ndev->dev, dma_addr, len,
+			dma_unmap_single(ndev->dev.parent, dma_addr, len,
 					 DMA_TO_DEVICE);
 			goto unmap;
 		}
@@ -1358,7 +1384,7 @@
 	return NETDEV_TX_OK;
 
 unmap:
-	dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 			 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
 drop:
 	dev_kfree_skb_any(skb);
@@ -1625,10 +1651,20 @@
 	return 0;
 }
 
+static const struct of_device_id ravb_match_table[] = {
+	{ .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
+	{ .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
+	{ .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ravb_match_table);
+
 static int ravb_probe(struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
+	const struct of_device_id *match;
 	struct ravb_private *priv;
+	enum ravb_chip_id chip_id;
 	struct net_device *ndev;
 	int error, irq, q;
 	struct resource *res;
@@ -1657,7 +1693,14 @@
 	/* The Ether-specific entries in the device structure. */
 	ndev->base_addr = res->start;
 	ndev->dma = -1;
-	irq = platform_get_irq(pdev, 0);
+
+	match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev);
+	chip_id = (enum ravb_chip_id)match->data;
+
+	if (chip_id == RCAR_GEN3)
+		irq = platform_get_irq_byname(pdev, "ch22");
+	else
+		irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
 		error = irq;
 		goto out_release;
@@ -1688,6 +1731,17 @@
 	priv->avb_link_active_low =
 		of_property_read_bool(np, "renesas,ether-link-active-low");
 
+	if (chip_id == RCAR_GEN3) {
+		irq = platform_get_irq_byname(pdev, "ch24");
+		if (irq < 0) {
+			error = irq;
+			goto out_release;
+		}
+		priv->emac_irq = irq;
+	}
+
+	priv->chip_id = chip_id;
+
 	/* Set function */
 	ndev->netdev_ops = &ravb_netdev_ops;
 	ndev->ethtool_ops = &ravb_ethtool_ops;
@@ -1708,7 +1762,7 @@
 
 	/* Allocate descriptor base address table */
 	priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
-	priv->desc_bat = dma_alloc_coherent(NULL, priv->desc_bat_size,
+	priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
 					    &priv->desc_bat_dma, GFP_KERNEL);
 	if (!priv->desc_bat) {
 		dev_err(&ndev->dev,
@@ -1763,7 +1817,7 @@
 	netif_napi_del(&priv->napi[RAVB_BE]);
 	ravb_mdio_release(priv);
 out_dma_free:
-	dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
 			  priv->desc_bat_dma);
 out_release:
 	if (ndev)
@@ -1779,7 +1833,7 @@
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct ravb_private *priv = netdev_priv(ndev);
 
-	dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
 			  priv->desc_bat_dma);
 	/* Set reset mode */
 	ravb_write(ndev, CCC_OPC_RESET, CCC);
@@ -1818,13 +1872,6 @@
 #define RAVB_PM_OPS NULL
 #endif
 
-static const struct of_device_id ravb_match_table[] = {
-	{ .compatible = "renesas,etheravb-r8a7790" },
-	{ .compatible = "renesas,etheravb-r8a7794" },
-	{ }
-};
-MODULE_DEVICE_TABLE(of, ravb_match_table);
-
 static struct platform_driver ravb_driver = {
 	.probe		= ravb_probe,
 	.remove		= ravb_remove,
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 34ac41a..eafa907 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -152,8 +152,9 @@
 	struct hlist_node entry;
 	u32 key_crc32; /* key */
 	bool learned;
+	unsigned long touched;
 	struct rocker_fdb_tbl_key {
-		u32 pport;
+		struct rocker_port *rocker_port;
 		u8 addr[ETH_ALEN];
 		__be16 vlan_id;
 	} key;
@@ -220,13 +221,13 @@
 	__be16 internal_vlan_id;
 	int stp_state;
 	u32 brport_flags;
+	unsigned long ageing_time;
 	bool ctrls[ROCKER_CTRL_MAX];
 	unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
 	struct napi_struct napi_tx;
 	struct napi_struct napi_rx;
 	struct rocker_dma_ring_info tx_ring;
 	struct rocker_dma_ring_info rx_ring;
-	struct list_head trans_mem;
 };
 
 struct rocker {
@@ -246,6 +247,7 @@
 	u64 flow_tbl_next_cookie;
 	DECLARE_HASHTABLE(group_tbl, 16);
 	spinlock_t group_tbl_lock;		/* for group tbl accesses */
+	struct timer_list fdb_cleanup_timer;
 	DECLARE_HASHTABLE(fdb_tbl, 16);
 	spinlock_t fdb_tbl_lock;		/* for fdb tbl accesses */
 	unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
@@ -340,74 +342,63 @@
 #define ROCKER_OP_FLAG_REFRESH		BIT(3)
 
 static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
-				     enum switchdev_trans trans, int flags,
+				     struct switchdev_trans *trans, int flags,
 				     size_t size)
 {
-	struct list_head *elem = NULL;
+	struct switchdev_trans_item *elem = NULL;
 	gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
 			  GFP_ATOMIC : GFP_KERNEL;
 
 	/* If in transaction prepare phase, allocate the memory
-	 * and enqueue it on a per-port list.  If in transaction
-	 * commit phase, dequeue the memory from the per-port list
+	 * and enqueue it on a transaction.  If in transaction
+	 * commit phase, dequeue the memory from the transaction
 	 * rather than re-allocating the memory.  The idea is the
 	 * driver code paths for prepare and commit are identical
 	 * so the memory allocated in the prepare phase is the
 	 * memory used in the commit phase.
 	 */
 
-	switch (trans) {
-	case SWITCHDEV_TRANS_PREPARE:
+	if (!trans) {
+		elem = kzalloc(size + sizeof(*elem), gfp_flags);
+	} else if (switchdev_trans_ph_prepare(trans)) {
 		elem = kzalloc(size + sizeof(*elem), gfp_flags);
 		if (!elem)
 			return NULL;
-		list_add_tail(elem, &rocker_port->trans_mem);
-		break;
-	case SWITCHDEV_TRANS_COMMIT:
-		BUG_ON(list_empty(&rocker_port->trans_mem));
-		elem = rocker_port->trans_mem.next;
-		list_del_init(elem);
-		break;
-	case SWITCHDEV_TRANS_NONE:
-		elem = kzalloc(size + sizeof(*elem), gfp_flags);
-		if (elem)
-			INIT_LIST_HEAD(elem);
-		break;
-	default:
-		break;
+		switchdev_trans_item_enqueue(trans, elem, kfree, elem);
+	} else {
+		elem = switchdev_trans_item_dequeue(trans);
 	}
 
 	return elem ? elem + 1 : NULL;
 }
 
 static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
-				 enum switchdev_trans trans, int flags,
+				 struct switchdev_trans *trans, int flags,
 				 size_t size)
 {
 	return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
 }
 
 static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
-				 enum switchdev_trans trans, int flags,
+				 struct switchdev_trans *trans, int flags,
 				 size_t n, size_t size)
 {
 	return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
 }
 
-static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
+static void rocker_port_kfree(struct switchdev_trans *trans, const void *mem)
 {
-	struct list_head *elem;
+	struct switchdev_trans_item *elem;
 
 	/* Frees are ignored if in transaction prepare phase.  The
 	 * memory remains on the per-port list until freed in the
 	 * commit phase.
 	 */
 
-	if (trans == SWITCHDEV_TRANS_PREPARE)
+	if (switchdev_trans_ph_prepare(trans))
 		return;
 
-	elem = (struct list_head *)mem - 1;
-	BUG_ON(!list_empty(elem));
+	elem = (struct switchdev_trans_item *) mem - 1;
 	kfree(elem);
 }
 
@@ -430,7 +421,7 @@
 }
 
 static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
-					      enum switchdev_trans trans,
+					      struct switchdev_trans *trans,
 					      int flags)
 {
 	struct rocker_wait *wait;
@@ -442,7 +433,7 @@
 	return wait;
 }
 
-static void rocker_wait_destroy(enum switchdev_trans trans,
+static void rocker_wait_destroy(struct switchdev_trans *trans,
 				struct rocker_wait *wait)
 {
 	rocker_port_kfree(trans, wait);
@@ -1408,7 +1399,7 @@
 		wait = rocker_desc_cookie_ptr_get(desc_info);
 		if (wait->nowait) {
 			rocker_desc_gen_clear(desc_info);
-			rocker_wait_destroy(SWITCHDEV_TRANS_NONE, wait);
+			rocker_wait_destroy(NULL, wait);
 		} else {
 			rocker_wait_wake_up(wait);
 		}
@@ -1463,7 +1454,7 @@
 }
 
 static int rocker_port_fdb(struct rocker_port *rocker_port,
-			   enum switchdev_trans trans,
+			   struct switchdev_trans *trans,
 			   const unsigned char *addr,
 			   __be16 vlan_id, int flags);
 
@@ -1496,8 +1487,7 @@
 	    rocker_port->stp_state != BR_STATE_FORWARDING)
 		return 0;
 
-	return rocker_port_fdb(rocker_port, SWITCHDEV_TRANS_NONE,
-			       addr, vlan_id, flags);
+	return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
 }
 
 static int rocker_event_process(const struct rocker *rocker,
@@ -1582,7 +1572,7 @@
 				    void *priv);
 
 static int rocker_cmd_exec(struct rocker_port *rocker_port,
-			   enum switchdev_trans trans, int flags,
+			   struct switchdev_trans *trans, int flags,
 			   rocker_cmd_prep_cb_t prepare, void *prepare_priv,
 			   rocker_cmd_proc_cb_t process, void *process_priv)
 {
@@ -1615,7 +1605,7 @@
 
 	rocker_desc_cookie_ptr_set(desc_info, wait);
 
-	if (trans != SWITCHDEV_TRANS_PREPARE)
+	if (!switchdev_trans_ph_prepare(trans))
 		rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
 
 	spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
@@ -1623,7 +1613,7 @@
 	if (nowait)
 		return 0;
 
-	if (trans != SWITCHDEV_TRANS_PREPARE)
+	if (!switchdev_trans_ph_prepare(trans))
 		if (!rocker_wait_event_timeout(wait, HZ / 10))
 			return -EIO;
 
@@ -1875,7 +1865,7 @@
 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
 						struct ethtool_cmd *ecmd)
 {
-	return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+	return rocker_cmd_exec(rocker_port, NULL, 0,
 			       rocker_cmd_get_port_settings_prep, NULL,
 			       rocker_cmd_get_port_settings_ethtool_proc,
 			       ecmd);
@@ -1884,7 +1874,7 @@
 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
 						unsigned char *macaddr)
 {
-	return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+	return rocker_cmd_exec(rocker_port, NULL, 0,
 			       rocker_cmd_get_port_settings_prep, NULL,
 			       rocker_cmd_get_port_settings_macaddr_proc,
 			       macaddr);
@@ -1893,7 +1883,7 @@
 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
 						struct ethtool_cmd *ecmd)
 {
-	return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+	return rocker_cmd_exec(rocker_port, NULL, 0,
 			       rocker_cmd_set_port_settings_ethtool_prep,
 			       ecmd, NULL, NULL);
 }
@@ -1901,7 +1891,7 @@
 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
 						unsigned char *macaddr)
 {
-	return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+	return rocker_cmd_exec(rocker_port, NULL, 0,
 			       rocker_cmd_set_port_settings_macaddr_prep,
 			       macaddr, NULL, NULL);
 }
@@ -1909,13 +1899,13 @@
 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
 					    int mtu)
 {
-	return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+	return rocker_cmd_exec(rocker_port, NULL, 0,
 			       rocker_cmd_set_port_settings_mtu_prep,
 			       &mtu, NULL, NULL);
 }
 
 static int rocker_port_set_learning(struct rocker_port *rocker_port,
-				    enum switchdev_trans trans)
+				    struct switchdev_trans *trans)
 {
 	return rocker_cmd_exec(rocker_port, trans, 0,
 			       rocker_cmd_set_port_learning_prep,
@@ -2433,7 +2423,7 @@
 }
 
 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
-			       enum switchdev_trans trans, int flags,
+			       struct switchdev_trans *trans, int flags,
 			       struct rocker_flow_tbl_entry *match)
 {
 	struct rocker *rocker = rocker_port->rocker;
@@ -2449,7 +2439,7 @@
 
 	if (found) {
 		match->cookie = found->cookie;
-		if (trans != SWITCHDEV_TRANS_PREPARE)
+		if (!switchdev_trans_ph_prepare(trans))
 			hash_del(&found->entry);
 		rocker_port_kfree(trans, found);
 		found = match;
@@ -2460,7 +2450,7 @@
 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
 	}
 
-	if (trans != SWITCHDEV_TRANS_PREPARE)
+	if (!switchdev_trans_ph_prepare(trans))
 		hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
 
 	spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
@@ -2470,7 +2460,7 @@
 }
 
 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
-			       enum switchdev_trans trans, int flags,
+			       struct switchdev_trans *trans, int flags,
 			       struct rocker_flow_tbl_entry *match)
 {
 	struct rocker *rocker = rocker_port->rocker;
@@ -2486,7 +2476,7 @@
 	found = rocker_flow_tbl_find(rocker, match);
 
 	if (found) {
-		if (trans != SWITCHDEV_TRANS_PREPARE)
+		if (!switchdev_trans_ph_prepare(trans))
 			hash_del(&found->entry);
 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
 	}
@@ -2506,7 +2496,7 @@
 }
 
 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
-			      enum switchdev_trans trans, int flags,
+			      struct switchdev_trans *trans, int flags,
 			      struct rocker_flow_tbl_entry *entry)
 {
 	if (flags & ROCKER_OP_FLAG_REMOVE)
@@ -2516,7 +2506,7 @@
 }
 
 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
-				   enum switchdev_trans trans, int flags,
+				   struct switchdev_trans *trans, int flags,
 				   u32 in_pport, u32 in_pport_mask,
 				   enum rocker_of_dpa_table_id goto_tbl)
 {
@@ -2536,7 +2526,7 @@
 }
 
 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
-				enum switchdev_trans trans, int flags,
+				struct switchdev_trans *trans, int flags,
 				u32 in_pport, __be16 vlan_id,
 				__be16 vlan_id_mask,
 				enum rocker_of_dpa_table_id goto_tbl,
@@ -2562,7 +2552,7 @@
 }
 
 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
-				    enum switchdev_trans trans,
+				    struct switchdev_trans *trans,
 				    u32 in_pport, u32 in_pport_mask,
 				    __be16 eth_type, const u8 *eth_dst,
 				    const u8 *eth_dst_mask, __be16 vlan_id,
@@ -2599,7 +2589,7 @@
 }
 
 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
-				  enum switchdev_trans trans, int flags,
+				  struct switchdev_trans *trans, int flags,
 				  const u8 *eth_dst, const u8 *eth_dst_mask,
 				  __be16 vlan_id, u32 tunnel_id,
 				  enum rocker_of_dpa_table_id goto_tbl,
@@ -2653,7 +2643,7 @@
 }
 
 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
-					  enum switchdev_trans trans,
+					  struct switchdev_trans *trans,
 					  __be16 eth_type, __be32 dst,
 					  __be32 dst_mask, u32 priority,
 					  enum rocker_of_dpa_table_id goto_tbl,
@@ -2679,7 +2669,7 @@
 }
 
 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
-			       enum switchdev_trans trans, int flags,
+			       struct switchdev_trans *trans, int flags,
 			       u32 in_pport, u32 in_pport_mask,
 			       const u8 *eth_src, const u8 *eth_src_mask,
 			       const u8 *eth_dst, const u8 *eth_dst_mask,
@@ -2744,7 +2734,7 @@
 	return NULL;
 }
 
-static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
+static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
 					struct rocker_group_tbl_entry *entry)
 {
 	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
@@ -2759,7 +2749,7 @@
 }
 
 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
-				enum switchdev_trans trans, int flags,
+				struct switchdev_trans *trans, int flags,
 				struct rocker_group_tbl_entry *match)
 {
 	struct rocker *rocker = rocker_port->rocker;
@@ -2771,7 +2761,7 @@
 	found = rocker_group_tbl_find(rocker, match);
 
 	if (found) {
-		if (trans != SWITCHDEV_TRANS_PREPARE)
+		if (!switchdev_trans_ph_prepare(trans))
 			hash_del(&found->entry);
 		rocker_group_tbl_entry_free(trans, found);
 		found = match;
@@ -2781,7 +2771,7 @@
 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
 	}
 
-	if (trans != SWITCHDEV_TRANS_PREPARE)
+	if (!switchdev_trans_ph_prepare(trans))
 		hash_add(rocker->group_tbl, &found->entry, found->group_id);
 
 	spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
@@ -2791,7 +2781,7 @@
 }
 
 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
-				enum switchdev_trans trans, int flags,
+				struct switchdev_trans *trans, int flags,
 				struct rocker_group_tbl_entry *match)
 {
 	struct rocker *rocker = rocker_port->rocker;
@@ -2804,7 +2794,7 @@
 	found = rocker_group_tbl_find(rocker, match);
 
 	if (found) {
-		if (trans != SWITCHDEV_TRANS_PREPARE)
+		if (!switchdev_trans_ph_prepare(trans))
 			hash_del(&found->entry);
 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
 	}
@@ -2824,7 +2814,7 @@
 }
 
 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
-			       enum switchdev_trans trans, int flags,
+			       struct switchdev_trans *trans, int flags,
 			       struct rocker_group_tbl_entry *entry)
 {
 	if (flags & ROCKER_OP_FLAG_REMOVE)
@@ -2834,7 +2824,7 @@
 }
 
 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
-				     enum switchdev_trans trans, int flags,
+				     struct switchdev_trans *trans, int flags,
 				     __be16 vlan_id, u32 out_pport,
 				     int pop_vlan)
 {
@@ -2851,7 +2841,7 @@
 }
 
 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
-				   enum switchdev_trans trans,
+				   struct switchdev_trans *trans,
 				   int flags, u8 group_count,
 				   const u32 *group_ids, u32 group_id)
 {
@@ -2876,7 +2866,7 @@
 }
 
 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
-				 enum switchdev_trans trans, int flags,
+				 struct switchdev_trans *trans, int flags,
 				 __be16 vlan_id, u8 group_count,
 				 const u32 *group_ids, u32 group_id)
 {
@@ -2886,7 +2876,7 @@
 }
 
 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
-				   enum switchdev_trans trans, int flags,
+				   struct switchdev_trans *trans, int flags,
 				   u32 index, const u8 *src_mac, const u8 *dst_mac,
 				   __be16 vlan_id, bool ttl_check, u32 pport)
 {
@@ -2922,22 +2912,22 @@
 }
 
 static void _rocker_neigh_add(struct rocker *rocker,
-			      enum switchdev_trans trans,
+			      struct switchdev_trans *trans,
 			      struct rocker_neigh_tbl_entry *entry)
 {
-	if (trans != SWITCHDEV_TRANS_COMMIT)
+	if (!switchdev_trans_ph_commit(trans))
 		entry->index = rocker->neigh_tbl_next_index++;
-	if (trans == SWITCHDEV_TRANS_PREPARE)
+	if (switchdev_trans_ph_prepare(trans))
 		return;
 	entry->ref_count++;
 	hash_add(rocker->neigh_tbl, &entry->entry,
 		 be32_to_cpu(entry->ip_addr));
 }
 
-static void _rocker_neigh_del(enum switchdev_trans trans,
+static void _rocker_neigh_del(struct switchdev_trans *trans,
 			      struct rocker_neigh_tbl_entry *entry)
 {
-	if (trans == SWITCHDEV_TRANS_PREPARE)
+	if (switchdev_trans_ph_prepare(trans))
 		return;
 	if (--entry->ref_count == 0) {
 		hash_del(&entry->entry);
@@ -2946,19 +2936,19 @@
 }
 
 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
-				 enum switchdev_trans trans,
+				 struct switchdev_trans *trans,
 				 const u8 *eth_dst, bool ttl_check)
 {
 	if (eth_dst) {
 		ether_addr_copy(entry->eth_dst, eth_dst);
 		entry->ttl_check = ttl_check;
-	} else if (trans != SWITCHDEV_TRANS_PREPARE) {
+	} else if (!switchdev_trans_ph_prepare(trans)) {
 		entry->ref_count++;
 	}
 }
 
 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
-				  enum switchdev_trans trans,
+				  struct switchdev_trans *trans,
 				  int flags, __be32 ip_addr, const u8 *eth_dst)
 {
 	struct rocker *rocker = rocker_port->rocker;
@@ -3050,7 +3040,8 @@
 }
 
 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
-				    enum switchdev_trans trans, __be32 ip_addr)
+				    struct switchdev_trans *trans,
+				    __be32 ip_addr)
 {
 	struct net_device *dev = rocker_port->dev;
 	struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
@@ -3078,7 +3069,7 @@
 }
 
 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
-			       enum switchdev_trans trans, int flags,
+			       struct switchdev_trans *trans, int flags,
 			       __be32 ip_addr, u32 *index)
 {
 	struct rocker *rocker = rocker_port->rocker;
@@ -3137,7 +3128,7 @@
 }
 
 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
-					enum switchdev_trans trans,
+					struct switchdev_trans *trans,
 					int flags, __be16 vlan_id)
 {
 	struct rocker_port *p;
@@ -3186,7 +3177,7 @@
 }
 
 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
-				      enum switchdev_trans trans, int flags,
+				      struct switchdev_trans *trans, int flags,
 				      __be16 vlan_id, bool pop_vlan)
 {
 	const struct rocker *rocker = rocker_port->rocker;
@@ -3292,7 +3283,7 @@
 };
 
 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
-				     enum switchdev_trans trans, int flags,
+				     struct switchdev_trans *trans, int flags,
 				     const struct rocker_ctrl *ctrl, __be16 vlan_id)
 {
 	u32 in_pport = rocker_port->pport;
@@ -3325,7 +3316,8 @@
 }
 
 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
-					enum switchdev_trans trans, int flags,
+					struct switchdev_trans *trans,
+					int flags,
 					const struct rocker_ctrl *ctrl,
 					__be16 vlan_id)
 {
@@ -3350,7 +3342,7 @@
 }
 
 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
-				      enum switchdev_trans trans, int flags,
+				      struct switchdev_trans *trans, int flags,
 				      const struct rocker_ctrl *ctrl, __be16 vlan_id)
 {
 	u32 in_pport_mask = 0xffffffff;
@@ -3374,7 +3366,7 @@
 }
 
 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
-				 enum switchdev_trans trans, int flags,
+				 struct switchdev_trans *trans, int flags,
 				 const struct rocker_ctrl *ctrl, __be16 vlan_id)
 {
 	if (ctrl->acl)
@@ -3392,7 +3384,7 @@
 }
 
 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
-				     enum switchdev_trans trans, int flags,
+				     struct switchdev_trans *trans, int flags,
 				     __be16 vlan_id)
 {
 	int err = 0;
@@ -3411,7 +3403,7 @@
 }
 
 static int rocker_port_ctrl(struct rocker_port *rocker_port,
-			    enum switchdev_trans trans, int flags,
+			    struct switchdev_trans *trans, int flags,
 			    const struct rocker_ctrl *ctrl)
 {
 	u16 vid;
@@ -3430,7 +3422,7 @@
 }
 
 static int rocker_port_vlan(struct rocker_port *rocker_port,
-			    enum switchdev_trans trans, int flags, u16 vid)
+			    struct switchdev_trans *trans, int flags, u16 vid)
 {
 	enum rocker_of_dpa_table_id goto_tbl =
 		ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
@@ -3487,14 +3479,14 @@
 			   "Error (%d) port VLAN table\n", err);
 
 err_out:
-	if (trans == SWITCHDEV_TRANS_PREPARE)
+	if (switchdev_trans_ph_prepare(trans))
 		change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
 
 	return err;
 }
 
 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
-			      enum switchdev_trans trans, int flags)
+			      struct switchdev_trans *trans, int flags)
 {
 	enum rocker_of_dpa_table_id goto_tbl;
 	u32 in_pport;
@@ -3522,7 +3514,7 @@
 struct rocker_fdb_learn_work {
 	struct work_struct work;
 	struct rocker_port *rocker_port;
-	enum switchdev_trans trans;
+	struct switchdev_trans *trans;
 	int flags;
 	u8 addr[ETH_ALEN];
 	u16 vid;
@@ -3550,7 +3542,7 @@
 }
 
 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
-				 enum switchdev_trans trans, int flags,
+				 struct switchdev_trans *trans, int flags,
 				 const u8 *addr, __be16 vlan_id)
 {
 	struct rocker_fdb_learn_work *lw;
@@ -3592,7 +3584,7 @@
 	ether_addr_copy(lw->addr, addr);
 	lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
 
-	if (trans == SWITCHDEV_TRANS_PREPARE)
+	if (switchdev_trans_ph_prepare(trans))
 		rocker_port_kfree(trans, lw);
 	else
 		schedule_work(&lw->work);
@@ -3614,7 +3606,7 @@
 }
 
 static int rocker_port_fdb(struct rocker_port *rocker_port,
-			   enum switchdev_trans trans,
+			   struct switchdev_trans *trans,
 			   const unsigned char *addr,
 			   __be16 vlan_id, int flags)
 {
@@ -3629,7 +3621,8 @@
 		return -ENOMEM;
 
 	fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
-	fdb->key.pport = rocker_port->pport;
+	fdb->touched = jiffies;
+	fdb->key.rocker_port = rocker_port;
 	ether_addr_copy(fdb->key.addr, addr);
 	fdb->key.vlan_id = vlan_id;
 	fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
@@ -3638,13 +3631,17 @@
 
 	found = rocker_fdb_tbl_find(rocker, fdb);
 
-	if (removing && found) {
-		rocker_port_kfree(trans, fdb);
-		if (trans != SWITCHDEV_TRANS_PREPARE)
-			hash_del(&found->entry);
-	} else if (!removing && !found) {
-		if (trans != SWITCHDEV_TRANS_PREPARE)
-			hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
+	if (found) {
+		found->touched = jiffies;
+		if (removing) {
+			rocker_port_kfree(trans, fdb);
+			if (!switchdev_trans_ph_prepare(trans))
+				hash_del(&found->entry);
+		}
+	} else if (!removing) {
+		if (!switchdev_trans_ph_prepare(trans))
+			hash_add(rocker->fdb_tbl, &fdb->entry,
+				 fdb->key_crc32);
 	}
 
 	spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
@@ -3662,7 +3659,7 @@
 }
 
 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
-				 enum switchdev_trans trans, int flags)
+				 struct switchdev_trans *trans, int flags)
 {
 	struct rocker *rocker = rocker_port->rocker;
 	struct rocker_fdb_tbl_entry *found;
@@ -3680,7 +3677,7 @@
 	spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
 
 	hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
-		if (found->key.pport != rocker_port->pport)
+		if (found->key.rocker_port != rocker_port)
 			continue;
 		if (!found->learned)
 			continue;
@@ -3689,7 +3686,7 @@
 					    found->key.vlan_id);
 		if (err)
 			goto err_out;
-		if (trans != SWITCHDEV_TRANS_PREPARE)
+		if (!switchdev_trans_ph_prepare(trans))
 			hash_del(&found->entry);
 	}
 
@@ -3699,8 +3696,43 @@
 	return err;
 }
 
+static void rocker_fdb_cleanup(unsigned long data)
+{
+	struct rocker *rocker = (struct rocker *)data;
+	struct rocker_port *rocker_port;
+	struct rocker_fdb_tbl_entry *entry;
+	struct hlist_node *tmp;
+	unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
+	unsigned long expires;
+	unsigned long lock_flags;
+	int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
+		    ROCKER_OP_FLAG_LEARNED;
+	int bkt;
+
+	spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+
+	hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
+		if (!entry->learned)
+			continue;
+		rocker_port = entry->key.rocker_port;
+		expires = entry->touched + rocker_port->ageing_time;
+		if (time_before_eq(expires, jiffies)) {
+			rocker_port_fdb_learn(rocker_port, NULL,
+					      flags, entry->key.addr,
+					      entry->key.vlan_id);
+			hash_del(&entry->entry);
+		} else if (time_before(expires, next_timer)) {
+			next_timer = expires;
+		}
+	}
+
+	spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+
+	mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
+}
+
 static int rocker_port_router_mac(struct rocker_port *rocker_port,
-				  enum switchdev_trans trans, int flags,
+				  struct switchdev_trans *trans, int flags,
 				  __be16 vlan_id)
 {
 	u32 in_pport_mask = 0xffffffff;
@@ -3733,7 +3765,7 @@
 }
 
 static int rocker_port_fwding(struct rocker_port *rocker_port,
-			      enum switchdev_trans trans, int flags)
+			      struct switchdev_trans *trans, int flags)
 {
 	bool pop_vlan;
 	u32 out_pport;
@@ -3772,16 +3804,16 @@
 }
 
 static int rocker_port_stp_update(struct rocker_port *rocker_port,
-				  enum switchdev_trans trans, int flags,
+				  struct switchdev_trans *trans, int flags,
 				  u8 state)
 {
 	bool want[ROCKER_CTRL_MAX] = { 0, };
 	bool prev_ctrls[ROCKER_CTRL_MAX];
-	u8 prev_state;
+	u8 uninitialized_var(prev_state);
 	int err;
 	int i;
 
-	if (trans == SWITCHDEV_TRANS_PREPARE) {
+	if (switchdev_trans_ph_prepare(trans)) {
 		memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
 		prev_state = rocker_port->stp_state;
 	}
@@ -3833,7 +3865,7 @@
 	err = rocker_port_fwding(rocker_port, trans, flags);
 
 err_out:
-	if (trans == SWITCHDEV_TRANS_PREPARE) {
+	if (switchdev_trans_ph_prepare(trans)) {
 		memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
 		rocker_port->stp_state = prev_state;
 	}
@@ -3842,7 +3874,7 @@
 }
 
 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
-				  enum switchdev_trans trans, int flags)
+				  struct switchdev_trans *trans, int flags)
 {
 	if (rocker_port_is_bridged(rocker_port))
 		/* bridge STP will enable port */
@@ -3854,7 +3886,7 @@
 }
 
 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
-				   enum switchdev_trans trans, int flags)
+				   struct switchdev_trans *trans, int flags)
 {
 	if (rocker_port_is_bridged(rocker_port))
 		/* bridge STP will disable port */
@@ -3952,7 +3984,7 @@
 }
 
 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
-				enum switchdev_trans trans, __be32 dst,
+				struct switchdev_trans *trans, __be32 dst,
 				int dst_len, const struct fib_info *fi,
 				u32 tb_id, int flags)
 {
@@ -4026,7 +4058,7 @@
 		goto err_request_rx_irq;
 	}
 
-	err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+	err = rocker_port_fwd_enable(rocker_port, NULL, 0);
 	if (err)
 		goto err_fwd_enable;
 
@@ -4054,7 +4086,7 @@
 	rocker_port_set_enable(rocker_port, false);
 	napi_disable(&rocker_port->napi_rx);
 	napi_disable(&rocker_port->napi_tx);
-	rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE,
+	rocker_port_fwd_disable(rocker_port, NULL,
 				ROCKER_OP_FLAG_NOWAIT);
 	free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
 	free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
@@ -4240,7 +4272,7 @@
 	struct port_name name = { .buf = buf, .len = len };
 	int err;
 
-	err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+	err = rocker_cmd_exec(rocker_port, NULL, 0,
 			      rocker_cmd_get_port_settings_prep, NULL,
 			      rocker_cmd_get_port_settings_phys_name_proc,
 			      &name);
@@ -4265,7 +4297,7 @@
 	int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
 	__be32 ip_addr = *(__be32 *)n->primary_key;
 
-	rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
+	rocker_port_ipv4_neigh(rocker_port, NULL,
 			       flags, ip_addr, n->ha);
 }
 
@@ -4297,11 +4329,11 @@
 	const struct rocker *rocker = rocker_port->rocker;
 
 	switch (attr->id) {
-	case SWITCHDEV_ATTR_PORT_PARENT_ID:
+	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
 		attr->u.ppid.id_len = sizeof(rocker->hw.id);
 		memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
 		break;
-	case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
 		attr->u.brport_flags = rocker_port->brport_flags;
 		break;
 	default:
@@ -4311,18 +4343,8 @@
 	return 0;
 }
 
-static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
-{
-	struct list_head *mem, *tmp;
-
-	list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
-		list_del(mem);
-		kfree(mem);
-	}
-}
-
 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
-					enum switchdev_trans trans,
+					struct switchdev_trans *trans,
 					unsigned long brport_flags)
 {
 	unsigned long orig_flags;
@@ -4333,39 +4355,45 @@
 	if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
 		err = rocker_port_set_learning(rocker_port, trans);
 
-	if (trans == SWITCHDEV_TRANS_PREPARE)
+	if (switchdev_trans_ph_prepare(trans))
 		rocker_port->brport_flags = orig_flags;
 
 	return err;
 }
 
+static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
+					  struct switchdev_trans *trans,
+					  u32 ageing_time)
+{
+	if (!switchdev_trans_ph_prepare(trans)) {
+		rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
+		mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
+	}
+
+	return 0;
+}
+
 static int rocker_port_attr_set(struct net_device *dev,
-				struct switchdev_attr *attr)
+				struct switchdev_attr *attr,
+				struct switchdev_trans *trans)
 {
 	struct rocker_port *rocker_port = netdev_priv(dev);
 	int err = 0;
 
-	switch (attr->trans) {
-	case SWITCHDEV_TRANS_PREPARE:
-		BUG_ON(!list_empty(&rocker_port->trans_mem));
-		break;
-	case SWITCHDEV_TRANS_ABORT:
-		rocker_port_trans_abort(rocker_port);
-		return 0;
-	default:
-		break;
-	}
-
 	switch (attr->id) {
-	case SWITCHDEV_ATTR_PORT_STP_STATE:
-		err = rocker_port_stp_update(rocker_port, attr->trans,
+	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+		err = rocker_port_stp_update(rocker_port, trans,
 					     ROCKER_OP_FLAG_NOWAIT,
 					     attr->u.stp_state);
 		break;
-	case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
-		err = rocker_port_brport_flags_set(rocker_port, attr->trans,
+	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+		err = rocker_port_brport_flags_set(rocker_port, trans,
 						   attr->u.brport_flags);
 		break;
+	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+		err = rocker_port_bridge_ageing_time(rocker_port, trans,
+						     attr->u.ageing_time);
+		break;
 	default:
 		err = -EOPNOTSUPP;
 		break;
@@ -4375,7 +4403,8 @@
 }
 
 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
-				enum switchdev_trans trans, u16 vid, u16 flags)
+				struct switchdev_trans *trans,
+				u16 vid, u16 flags)
 {
 	int err;
 
@@ -4394,8 +4423,8 @@
 }
 
 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
-				 enum switchdev_trans trans,
-				 const struct switchdev_obj_vlan *vlan)
+				 struct switchdev_trans *trans,
+				 const struct switchdev_obj_port_vlan *vlan)
 {
 	u16 vid;
 	int err;
@@ -4411,8 +4440,8 @@
 }
 
 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
-			       enum switchdev_trans trans,
-			       const struct switchdev_obj_fdb *fdb)
+			       struct switchdev_trans *trans,
+			       const struct switchdev_obj_port_fdb *fdb)
 {
 	__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
 	int flags = 0;
@@ -4424,36 +4453,27 @@
 }
 
 static int rocker_port_obj_add(struct net_device *dev,
-			       struct switchdev_obj *obj)
+			       const struct switchdev_obj *obj,
+			       struct switchdev_trans *trans)
 {
 	struct rocker_port *rocker_port = netdev_priv(dev);
 	const struct switchdev_obj_ipv4_fib *fib4;
 	int err = 0;
 
-	switch (obj->trans) {
-	case SWITCHDEV_TRANS_PREPARE:
-		BUG_ON(!list_empty(&rocker_port->trans_mem));
-		break;
-	case SWITCHDEV_TRANS_ABORT:
-		rocker_port_trans_abort(rocker_port);
-		return 0;
-	default:
-		break;
-	}
-
 	switch (obj->id) {
-	case SWITCHDEV_OBJ_PORT_VLAN:
-		err = rocker_port_vlans_add(rocker_port, obj->trans,
-					    &obj->u.vlan);
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = rocker_port_vlans_add(rocker_port, trans,
+					    SWITCHDEV_OBJ_PORT_VLAN(obj));
 		break;
-	case SWITCHDEV_OBJ_IPV4_FIB:
-		fib4 = &obj->u.ipv4_fib;
-		err = rocker_port_fib_ipv4(rocker_port, obj->trans,
+	case SWITCHDEV_OBJ_ID_IPV4_FIB:
+		fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
+		err = rocker_port_fib_ipv4(rocker_port, trans,
 					   htonl(fib4->dst), fib4->dst_len,
 					   fib4->fi, fib4->tb_id, 0);
 		break;
-	case SWITCHDEV_OBJ_PORT_FDB:
-		err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = rocker_port_fdb_add(rocker_port, trans,
+					  SWITCHDEV_OBJ_PORT_FDB(obj));
 		break;
 	default:
 		err = -EOPNOTSUPP;
@@ -4468,17 +4488,17 @@
 {
 	int err;
 
-	err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
+	err = rocker_port_router_mac(rocker_port, NULL,
 				     ROCKER_OP_FLAG_REMOVE, htons(vid));
 	if (err)
 		return err;
 
-	return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
+	return rocker_port_vlan(rocker_port, NULL,
 				ROCKER_OP_FLAG_REMOVE, vid);
 }
 
 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
-				 const struct switchdev_obj_vlan *vlan)
+				 const struct switchdev_obj_port_vlan *vlan)
 {
 	u16 vid;
 	int err;
@@ -4493,8 +4513,8 @@
 }
 
 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
-			       enum switchdev_trans trans,
-			       const struct switchdev_obj_fdb *fdb)
+			       struct switchdev_trans *trans,
+			       const struct switchdev_obj_port_fdb *fdb)
 {
 	__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
 	int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
@@ -4506,25 +4526,27 @@
 }
 
 static int rocker_port_obj_del(struct net_device *dev,
-			       struct switchdev_obj *obj)
+			       const struct switchdev_obj *obj)
 {
 	struct rocker_port *rocker_port = netdev_priv(dev);
 	const struct switchdev_obj_ipv4_fib *fib4;
 	int err = 0;
 
 	switch (obj->id) {
-	case SWITCHDEV_OBJ_PORT_VLAN:
-		err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = rocker_port_vlans_del(rocker_port,
+					    SWITCHDEV_OBJ_PORT_VLAN(obj));
 		break;
-	case SWITCHDEV_OBJ_IPV4_FIB:
-		fib4 = &obj->u.ipv4_fib;
-		err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
+	case SWITCHDEV_OBJ_ID_IPV4_FIB:
+		fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
+		err = rocker_port_fib_ipv4(rocker_port, NULL,
 					   htonl(fib4->dst), fib4->dst_len,
 					   fib4->fi, fib4->tb_id,
 					   ROCKER_OP_FLAG_REMOVE);
 		break;
-	case SWITCHDEV_OBJ_PORT_FDB:
-		err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = rocker_port_fdb_del(rocker_port, NULL,
+					  SWITCHDEV_OBJ_PORT_FDB(obj));
 		break;
 	default:
 		err = -EOPNOTSUPP;
@@ -4535,10 +4557,10 @@
 }
 
 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
-				struct switchdev_obj *obj)
+				struct switchdev_obj_port_fdb *fdb,
+				switchdev_obj_dump_cb_t *cb)
 {
 	struct rocker *rocker = rocker_port->rocker;
-	struct switchdev_obj_fdb *fdb = &obj->u.fdb;
 	struct rocker_fdb_tbl_entry *found;
 	struct hlist_node *tmp;
 	unsigned long lock_flags;
@@ -4547,13 +4569,13 @@
 
 	spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
 	hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
-		if (found->key.pport != rocker_port->pport)
+		if (found->key.rocker_port != rocker_port)
 			continue;
 		fdb->addr = found->key.addr;
 		fdb->ndm_state = NUD_REACHABLE;
 		fdb->vid = rocker_port_vlan_to_vid(rocker_port,
 						   found->key.vlan_id);
-		err = obj->cb(rocker_port->dev, obj);
+		err = cb(&fdb->obj);
 		if (err)
 			break;
 	}
@@ -4563,9 +4585,9 @@
 }
 
 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
-				 struct switchdev_obj *obj)
+				 struct switchdev_obj_port_vlan *vlan,
+				 switchdev_obj_dump_cb_t *cb)
 {
-	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
 	u16 vid;
 	int err = 0;
 
@@ -4576,7 +4598,7 @@
 		if (rocker_vlan_id_is_internal(htons(vid)))
 			vlan->flags |= BRIDGE_VLAN_INFO_PVID;
 		vlan->vid_begin = vlan->vid_end = vid;
-		err = obj->cb(rocker_port->dev, obj);
+		err = cb(&vlan->obj);
 		if (err)
 			break;
 	}
@@ -4585,17 +4607,20 @@
 }
 
 static int rocker_port_obj_dump(struct net_device *dev,
-				struct switchdev_obj *obj)
+				struct switchdev_obj *obj,
+				switchdev_obj_dump_cb_t *cb)
 {
 	const struct rocker_port *rocker_port = netdev_priv(dev);
 	int err = 0;
 
 	switch (obj->id) {
-	case SWITCHDEV_OBJ_PORT_FDB:
-		err = rocker_port_fdb_dump(rocker_port, obj);
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = rocker_port_fdb_dump(rocker_port,
+					   SWITCHDEV_OBJ_PORT_FDB(obj), cb);
 		break;
-	case SWITCHDEV_OBJ_PORT_VLAN:
-		err = rocker_port_vlan_dump(rocker_port, obj);
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = rocker_port_vlan_dump(rocker_port,
+					    SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
 		break;
 	default:
 		err = -EOPNOTSUPP;
@@ -4738,7 +4763,7 @@
 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
 					     void *priv)
 {
-	return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+	return rocker_cmd_exec(rocker_port, NULL, 0,
 			       rocker_cmd_get_port_stats_prep, NULL,
 			       rocker_cmd_get_port_stats_ethtool_proc,
 			       priv);
@@ -4930,8 +4955,7 @@
 		rocker_port = rocker->ports[i];
 		if (!rocker_port)
 			continue;
-		rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
-				   ROCKER_OP_FLAG_REMOVE);
+		rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
 		unregister_netdev(rocker_port->dev);
 		free_netdev(rocker_port->dev);
 	}
@@ -4969,7 +4993,7 @@
 	rocker_port->port_number = port_number;
 	rocker_port->pport = port_number + 1;
 	rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
-	INIT_LIST_HEAD(&rocker_port->trans_mem);
+	rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
 
 	rocker_port_dev_addr_init(rocker_port);
 	dev->netdev_ops = &rocker_port_netdev_ops;
@@ -4992,9 +5016,9 @@
 
 	switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
 
-	rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
+	rocker_port_set_learning(rocker_port, NULL);
 
-	err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+	err = rocker_port_ig_tbl(rocker_port, NULL, 0);
 	if (err) {
 		netdev_err(rocker_port->dev, "install ig port table failed\n");
 		goto err_port_ig_tbl;
@@ -5003,8 +5027,7 @@
 	rocker_port->internal_vlan_id =
 		rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
 
-	err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
-				   untagged_vid, 0);
+	err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
 	if (err) {
 		netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
 		goto err_untagged_vlan;
@@ -5013,8 +5036,7 @@
 	return 0;
 
 err_untagged_vlan:
-	rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
-			   ROCKER_OP_FLAG_REMOVE);
+	rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
 err_port_ig_tbl:
 	rocker->ports[port_number] = NULL;
 	unregister_netdev(dev);
@@ -5183,6 +5205,10 @@
 		goto err_init_tbls;
 	}
 
+	setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
+		    (unsigned long) rocker);
+	mod_timer(&rocker->fdb_cleanup_timer, jiffies);
+
 	err = rocker_probe_ports(rocker);
 	if (err) {
 		dev_err(&pdev->dev, "failed to probe ports\n");
@@ -5195,6 +5221,7 @@
 	return 0;
 
 err_probe_ports:
+	del_timer_sync(&rocker->fdb_cleanup_timer);
 	rocker_free_tbls(rocker);
 err_init_tbls:
 	free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
@@ -5222,6 +5249,7 @@
 {
 	struct rocker *rocker = pci_get_drvdata(pdev);
 
+	del_timer_sync(&rocker->fdb_cleanup_timer);
 	rocker_free_tbls(rocker);
 	rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
 	rocker_remove_ports(rocker);
@@ -5275,8 +5303,7 @@
 	rocker_port->bridge_dev = bridge;
 	switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
 
-	return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
-				    untagged_vid, 0);
+	return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
 }
 
 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
@@ -5298,14 +5325,12 @@
 				    false);
 	rocker_port->bridge_dev = NULL;
 
-	err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
-				   untagged_vid, 0);
+	err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
 	if (err)
 		return err;
 
 	if (rocker_port->dev->flags & IFF_UP)
-		err = rocker_port_fwd_enable(rocker_port,
-					     SWITCHDEV_TRANS_NONE, 0);
+		err = rocker_port_fwd_enable(rocker_port, NULL, 0);
 
 	return err;
 }
@@ -5318,10 +5343,10 @@
 
 	rocker_port->bridge_dev = master;
 
-	err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+	err = rocker_port_fwd_disable(rocker_port, NULL, 0);
 	if (err)
 		return err;
-	err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+	err = rocker_port_fwd_enable(rocker_port, NULL, 0);
 
 	return err;
 }
@@ -5399,8 +5424,7 @@
 		    ROCKER_OP_FLAG_NOWAIT;
 	__be32 ip_addr = *(__be32 *)n->primary_key;
 
-	return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
-				      flags, ip_addr, n->ha);
+	return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
 }
 
 static int rocker_netevent_event(struct notifier_block *unused,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index ff649eb..78b7b7b 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1604,6 +1604,22 @@
 	memcpy(outbuf, pdu + offset, outlen);
 }
 
+static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+	/* All our allocations have been reset */
+	efx_ef10_reset_mc_allocations(efx);
+
+	/* The datapath firmware might have been changed */
+	nic_data->must_check_datapath_caps = true;
+
+	/* MAC statistics have been cleared on the NIC; clear the local
+	 * statistic that we update with efx_update_diff_stat().
+	 */
+	nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
+}
+
 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
 {
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -1623,17 +1639,7 @@
 		return 0;
 
 	nic_data->warm_boot_count = rc;
-
-	/* All our allocations have been reset */
-	efx_ef10_reset_mc_allocations(efx);
-
-	/* The datapath firmware might have been changed */
-	nic_data->must_check_datapath_caps = true;
-
-	/* MAC statistics have been cleared on the NIC; clear the local
-	 * statistic that we update with efx_update_diff_stat().
-	 */
-	nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
+	efx_ef10_mcdi_reboot_detected(efx);
 
 	return -EIO;
 }
@@ -4670,6 +4676,7 @@
 	.mcdi_poll_response = efx_ef10_mcdi_poll_response,
 	.mcdi_read_response = efx_ef10_mcdi_read_response,
 	.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+	.mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
 	.irq_enable_master = efx_port_dummy_op_void,
 	.irq_test_generate = efx_ef10_irq_test_generate,
 	.irq_disable_non_ev = efx_port_dummy_op_void,
@@ -4774,6 +4781,7 @@
 	.mcdi_poll_response = efx_ef10_mcdi_poll_response,
 	.mcdi_read_response = efx_ef10_mcdi_read_response,
 	.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+	.mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
 	.irq_enable_master = efx_port_dummy_op_void,
 	.irq_test_generate = efx_ef10_irq_test_generate,
 	.irq_disable_non_ev = efx_port_dummy_op_void,
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 98d172b..d3f307e 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1028,10 +1028,21 @@
 
 		/* Consume the status word since efx_mcdi_rpc_finish() won't */
 		for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
-			if (efx_mcdi_poll_reboot(efx))
+			rc = efx_mcdi_poll_reboot(efx);
+			if (rc)
 				break;
 			udelay(MCDI_STATUS_DELAY_US);
 		}
+
+		/* On EF10, a CODE_MC_REBOOT event can be received without the
+		 * reboot detection in efx_mcdi_poll_reboot() being triggered.
+		 * If zero was returned from the final call to
+		 * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the
+		 * MC has definitely rebooted so prepare for the reset.
+		 */
+		if (!rc && efx->type->mcdi_reboot_detected)
+			efx->type->mcdi_reboot_detected(efx);
+
 		mcdi->new_epoch = true;
 
 		/* Nobody was waiting for an MCDI request, so trigger a reset */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c530e1c..ad56231 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1277,6 +1277,7 @@
 	void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
 				   size_t pdu_offset, size_t pdu_len);
 	int (*mcdi_poll_reboot)(struct efx_nic *efx);
+	void (*mcdi_reboot_detected)(struct efx_nic *efx);
 	void (*irq_enable_master)(struct efx_nic *efx);
 	void (*irq_test_generate)(struct efx_nic *efx);
 	void (*irq_disable_non_ev)(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 925f2f8..64d8aa4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -424,7 +424,7 @@
 {
 	struct stmmac_priv *priv = netdev_priv(dev);
 	struct hwtstamp_config config;
-	struct timespec now;
+	struct timespec64 now;
 	u64 temp = 0;
 	u32 ptp_v2 = 0;
 	u32 tstamp_all = 0;
@@ -621,8 +621,10 @@
 					     priv->default_addend);
 
 		/* initialize system time */
-		getnstimeofday(&now);
-		priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
+		ktime_get_real_ts64(&now);
+
+		/* lower 32 bits of tv_sec are safe until y2106 */
+		priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec,
 					    now.tv_nsec);
 	}
 
@@ -1945,7 +1947,7 @@
 {
 	struct stmmac_priv *priv = netdev_priv(dev);
 	unsigned int txsize = priv->dma_tx_size;
-	unsigned int entry;
+	int entry;
 	int i, csum_insertion = 0, is_jumbo = 0;
 	int nfrags = skb_shinfo(skb)->nr_frags;
 	struct dma_desc *desc, *first;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index b735fa2..ebf6abc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -161,11 +161,16 @@
 
 		if (!gpio_request(reset_gpio, "mdio-reset")) {
 			gpio_direction_output(reset_gpio, active_low ? 1 : 0);
-			udelay(data->delays[0]);
+			if (data->delays[0])
+				msleep(DIV_ROUND_UP(data->delays[0], 1000));
+
 			gpio_set_value(reset_gpio, active_low ? 0 : 1);
-			udelay(data->delays[1]);
+			if (data->delays[1])
+				msleep(DIV_ROUND_UP(data->delays[1], 1000));
+
 			gpio_set_value(reset_gpio, active_low ? 1 : 0);
-			udelay(data->delays[2]);
+			if (data->delays[2])
+				msleep(DIV_ROUND_UP(data->delays[2], 1000));
 		}
 	}
 #endif
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 53fe200..cc106d8 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1756,7 +1756,8 @@
 #endif
 };
 
-static struct vnet *vnet_new(const u64 *local_mac)
+static struct vnet *vnet_new(const u64 *local_mac,
+			     struct vio_dev *vdev)
 {
 	struct net_device *dev;
 	struct vnet *vp;
@@ -1790,6 +1791,8 @@
 			   NETIF_F_HW_CSUM | NETIF_F_SG;
 	dev->features = dev->hw_features;
 
+	SET_NETDEV_DEV(dev, &vdev->dev);
+
 	err = register_netdev(dev);
 	if (err) {
 		pr_err("Cannot register net device, aborting\n");
@@ -1808,7 +1811,8 @@
 	return ERR_PTR(err);
 }
 
-static struct vnet *vnet_find_or_create(const u64 *local_mac)
+static struct vnet *vnet_find_or_create(const u64 *local_mac,
+					struct vio_dev *vdev)
 {
 	struct vnet *iter, *vp;
 
@@ -1821,7 +1825,7 @@
 		}
 	}
 	if (!vp)
-		vp = vnet_new(local_mac);
+		vp = vnet_new(local_mac, vdev);
 	mutex_unlock(&vnet_list_mutex);
 
 	return vp;
@@ -1848,7 +1852,8 @@
 static const char *local_mac_prop = "local-mac-address";
 
 static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
-						u64 port_node)
+				     u64 port_node,
+				     struct vio_dev *vdev)
 {
 	const u64 *local_mac = NULL;
 	u64 a;
@@ -1869,7 +1874,7 @@
 	if (!local_mac)
 		return ERR_PTR(-ENODEV);
 
-	return vnet_find_or_create(local_mac);
+	return vnet_find_or_create(local_mac, vdev);
 }
 
 static struct ldc_channel_config vnet_ldc_cfg = {
@@ -1923,7 +1928,7 @@
 
 	hp = mdesc_grab();
 
-	vp = vnet_find_parent(hp, vdev->mp);
+	vp = vnet_find_parent(hp, vdev->mp, vdev);
 	if (IS_ERR(vp)) {
 		pr_err("Cannot find port parent vnet\n");
 		err = PTR_ERR(vp);
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index f595094..c08be62 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -19,11 +19,38 @@
 
 #include "cpsw.h"
 
-#define AM33XX_CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id))
-#define AM33XX_CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4)
+#define CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id))
+#define CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4)
 
-int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
-			     u8 *mac_addr)
+static int davinci_emac_3517_get_macid(struct device *dev, u16 offset,
+				       int slave, u8 *mac_addr)
+{
+	u32 macid_lsb;
+	u32 macid_msb;
+	struct regmap *syscon;
+
+	syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+	if (IS_ERR(syscon)) {
+		if (PTR_ERR(syscon) == -ENODEV)
+			return 0;
+		return PTR_ERR(syscon);
+	}
+
+	regmap_read(syscon, CTRL_MAC_LO_REG(offset, slave), &macid_lsb);
+	regmap_read(syscon, CTRL_MAC_HI_REG(offset, slave), &macid_msb);
+
+	mac_addr[0] = (macid_msb >> 16) & 0xff;
+	mac_addr[1] = (macid_msb >> 8)  & 0xff;
+	mac_addr[2] = macid_msb & 0xff;
+	mac_addr[3] = (macid_lsb >> 16) & 0xff;
+	mac_addr[4] = (macid_lsb >> 8)  & 0xff;
+	mac_addr[5] = macid_lsb & 0xff;
+
+	return 0;
+}
+
+static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
+				    u8 *mac_addr)
 {
 	u32 macid_lo;
 	u32 macid_hi;
@@ -36,10 +63,8 @@
 		return PTR_ERR(syscon);
 	}
 
-	regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(offset, slave),
-		    &macid_lo);
-	regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(offset, slave),
-		    &macid_hi);
+	regmap_read(syscon, CTRL_MAC_LO_REG(offset, slave), &macid_lo);
+	regmap_read(syscon, CTRL_MAC_HI_REG(offset, slave), &macid_hi);
 
 	mac_addr[5] = (macid_lo >> 8) & 0xff;
 	mac_addr[4] = macid_lo & 0xff;
@@ -50,6 +75,27 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(cpsw_am33xx_cm_get_macid);
+
+int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
+{
+	if (of_machine_is_compatible("ti,am33xx"))
+		return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+	if (of_device_is_compatible(dev->of_node, "ti,am3517-emac"))
+		return davinci_emac_3517_get_macid(dev, 0x110, slave, mac_addr);
+
+	if (of_device_is_compatible(dev->of_node, "ti,dm816-emac"))
+		return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr);
+
+	if (of_machine_is_compatible("ti,am4372"))
+		return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+	if (of_machine_is_compatible("ti,dra7"))
+		return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr);
+
+	dev_err(dev, "incompatible machine/device type for reading mac address\n");
+	return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(ti_cm_get_macid);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 0ea78326..e9cc61e 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -2,6 +2,8 @@
  *
  * Copyright (C) 2013 Texas Instruments
  *
+ * Module Author: Mugunthan V N <mugunthanvnm@ti.com>
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * version 2 as published by the Free Software Foundation.
@@ -13,7 +15,7 @@
  */
 
 #include <linux/platform_device.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <linux/of.h>
@@ -173,7 +175,6 @@
 	},
 	{}
 };
-MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
 
 static int cpsw_phy_sel_probe(struct platform_device *pdev)
 {
@@ -214,7 +215,4 @@
 		.of_match_table = cpsw_phy_sel_id_table,
 	},
 };
-
-module_platform_driver(cpsw_phy_sel_driver);
-MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(cpsw_phy_sel_driver);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c670317..75584cc 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2058,13 +2058,10 @@
 		if (mac_addr) {
 			memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
 		} else {
-			if (of_machine_is_compatible("ti,am33xx")) {
-				ret = cpsw_am33xx_cm_get_macid(&pdev->dev,
-							0x630, i,
-							slave_data->mac_addr);
-				if (ret)
-					return ret;
-			}
+			ret = ti_cm_get_macid(&pdev->dev, i,
+					      slave_data->mac_addr);
+			if (ret)
+				return ret;
 		}
 		if (data->dual_emac) {
 			if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index ca90efa..442a703 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -41,7 +41,6 @@
 };
 
 void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
-int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
-			     u8 *mac_addr);
+int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr);
 
 #endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index a21c77b..33bd3b9 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1861,8 +1861,12 @@
 	pdata->no_bd_ram = of_property_read_bool(np, "ti,davinci-no-bd-ram");
 
 	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
-	if (!priv->phy_node)
-		pdata->phy_id = NULL;
+	if (!priv->phy_node) {
+		if (!of_phy_is_fixed_link(np))
+			pdata->phy_id = NULL;
+		else if (of_phy_register_fixed_link(np) >= 0)
+			priv->phy_node = of_node_get(np);
+	}
 
 	auxdata = pdev->dev.platform_data;
 	if (auxdata) {
@@ -1882,51 +1886,13 @@
 	return  pdata;
 }
 
-static int davinci_emac_3517_get_macid(struct device *dev, u16 offset,
-				       int slave, u8 *mac_addr)
-{
-	u32 macid_lsb;
-	u32 macid_msb;
-	struct regmap *syscon;
-
-	syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
-	if (IS_ERR(syscon)) {
-		if (PTR_ERR(syscon) == -ENODEV)
-			return 0;
-		return PTR_ERR(syscon);
-	}
-
-	regmap_read(syscon, offset, &macid_lsb);
-	regmap_read(syscon, offset + 4, &macid_msb);
-
-	mac_addr[0] = (macid_msb >> 16) & 0xff;
-	mac_addr[1] = (macid_msb >> 8)  & 0xff;
-	mac_addr[2] = macid_msb & 0xff;
-	mac_addr[3] = (macid_lsb >> 16) & 0xff;
-	mac_addr[4] = (macid_lsb >> 8)  & 0xff;
-	mac_addr[5] = macid_lsb & 0xff;
-
-	return 0;
-}
-
 static int davinci_emac_try_get_mac(struct platform_device *pdev,
 				    int instance, u8 *mac_addr)
 {
-	int error = -EINVAL;
-
 	if (!pdev->dev.of_node)
-		return error;
+		return -EINVAL;
 
-	if (of_device_is_compatible(pdev->dev.of_node, "ti,am3517-emac"))
-		error = davinci_emac_3517_get_macid(&pdev->dev, 0x110,
-						    0, mac_addr);
-	else if (of_device_is_compatible(pdev->dev.of_node,
-					 "ti,dm816-emac"))
-		error = cpsw_am33xx_cm_get_macid(&pdev->dev, 0x30,
-						 instance,
-						 mac_addr);
-
-	return error;
+	return ti_cm_get_macid(&pdev->dev, instance, mac_addr);
 }
 
 /**
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1a5aca5..9f9832f 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -291,13 +291,6 @@
 			    interface_list) {
 		struct netcp_intf_modpriv *intf_modpriv;
 
-		/* If interface not registered then register now */
-		if (!netcp_intf->netdev_registered)
-			ret = netcp_register_interface(netcp_intf);
-
-		if (ret)
-			return -ENODEV;
-
 		intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
 					    GFP_KERNEL);
 		if (!intf_modpriv)
@@ -306,6 +299,11 @@
 		interface = of_parse_phandle(netcp_intf->node_interface,
 					     module->name, 0);
 
+		if (!interface) {
+			devm_kfree(dev, intf_modpriv);
+			continue;
+		}
+
 		intf_modpriv->netcp_priv = netcp_intf;
 		intf_modpriv->netcp_module = module;
 		list_add_tail(&intf_modpriv->intf_list,
@@ -323,6 +321,18 @@
 			continue;
 		}
 	}
+
+	/* Now register the interface with netdev */
+	list_for_each_entry(netcp_intf,
+			    &netcp_device->interface_head,
+			    interface_list) {
+		/* If interface not registered then register now */
+		if (!netcp_intf->netdev_registered) {
+			ret = netcp_register_interface(netcp_intf);
+			if (ret)
+				return -ENODEV;
+		}
+	}
 	return 0;
 }
 
@@ -357,7 +367,6 @@
 		if (ret < 0)
 			goto fail;
 	}
-
 	mutex_unlock(&netcp_modules_lock);
 	return 0;
 
@@ -796,7 +805,7 @@
 	netcp->rx_pool = NULL;
 }
 
-static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
+static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
 {
 	struct knav_dma_desc *hwdesc;
 	unsigned int buf_len, dma_sz;
@@ -810,7 +819,7 @@
 	hwdesc = knav_pool_desc_get(netcp->rx_pool);
 	if (IS_ERR_OR_NULL(hwdesc)) {
 		dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
-		return;
+		return -ENOMEM;
 	}
 
 	if (likely(fdq == 0)) {
@@ -862,25 +871,26 @@
 	knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
 			   &dma_sz);
 	knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
-	return;
+	return 0;
 
 fail:
 	knav_pool_desc_put(netcp->rx_pool, hwdesc);
+	return -ENOMEM;
 }
 
 /* Refill Rx FDQ with descriptors & attached buffers */
 static void netcp_rxpool_refill(struct netcp_intf *netcp)
 {
 	u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
-	int i;
+	int i, ret = 0;
 
 	/* Calculate the FDQ deficit and refill */
 	for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
 		fdq_deficit[i] = netcp->rx_queue_depths[i] -
 				 knav_queue_get_count(netcp->rx_fdq[i]);
 
-		while (fdq_deficit[i]--)
-			netcp_allocate_rx_buf(netcp, i);
+		while (fdq_deficit[i]-- && !ret)
+			ret = netcp_allocate_rx_buf(netcp, i);
 	} /* end for fdqs */
 }
 
@@ -893,12 +903,12 @@
 
 	packets = netcp_process_rx_packets(netcp, budget);
 
+	netcp_rxpool_refill(netcp);
 	if (packets < budget) {
 		napi_complete(&netcp->rx_napi);
 		knav_queue_enable_notify(netcp->rx_queue);
 	}
 
-	netcp_rxpool_refill(netcp);
 	return packets;
 }
 
@@ -1384,7 +1394,6 @@
 			continue;
 		dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
 			naddr->addr, naddr->type);
-		mutex_lock(&netcp_modules_lock);
 		for_each_module(netcp, priv) {
 			module = priv->netcp_module;
 			if (!module->del_addr)
@@ -1393,7 +1402,6 @@
 						 naddr);
 			WARN_ON(error);
 		}
-		mutex_unlock(&netcp_modules_lock);
 		netcp_addr_del(netcp, naddr);
 	}
 }
@@ -1410,7 +1418,7 @@
 			continue;
 		dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
 			naddr->addr, naddr->type);
-		mutex_lock(&netcp_modules_lock);
+
 		for_each_module(netcp, priv) {
 			module = priv->netcp_module;
 			if (!module->add_addr)
@@ -1418,7 +1426,6 @@
 			error = module->add_addr(priv->module_priv, naddr);
 			WARN_ON(error);
 		}
-		mutex_unlock(&netcp_modules_lock);
 	}
 }
 
@@ -1432,6 +1439,7 @@
 		   ndev->flags & IFF_ALLMULTI ||
 		   netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
 
+	spin_lock(&netcp->lock);
 	/* first clear all marks */
 	netcp_addr_clear_mark(netcp);
 
@@ -1450,6 +1458,7 @@
 	/* finally sweep and callout into modules */
 	netcp_addr_sweep_del(netcp);
 	netcp_addr_sweep_add(netcp);
+	spin_unlock(&netcp->lock);
 }
 
 static void netcp_free_navigator_resources(struct netcp_intf *netcp)
@@ -1614,7 +1623,6 @@
 		goto fail;
 	}
 
-	mutex_lock(&netcp_modules_lock);
 	for_each_module(netcp, intf_modpriv) {
 		module = intf_modpriv->netcp_module;
 		if (module->open) {
@@ -1625,7 +1633,6 @@
 			}
 		}
 	}
-	mutex_unlock(&netcp_modules_lock);
 
 	napi_enable(&netcp->rx_napi);
 	napi_enable(&netcp->tx_napi);
@@ -1642,7 +1649,6 @@
 		if (module->close)
 			module->close(intf_modpriv->module_priv, ndev);
 	}
-	mutex_unlock(&netcp_modules_lock);
 
 fail:
 	netcp_free_navigator_resources(netcp);
@@ -1666,7 +1672,6 @@
 	napi_disable(&netcp->rx_napi);
 	napi_disable(&netcp->tx_napi);
 
-	mutex_lock(&netcp_modules_lock);
 	for_each_module(netcp, intf_modpriv) {
 		module = intf_modpriv->netcp_module;
 		if (module->close) {
@@ -1675,7 +1680,6 @@
 				dev_err(netcp->ndev_dev, "Close failed\n");
 		}
 	}
-	mutex_unlock(&netcp_modules_lock);
 
 	/* Recycle Rx descriptors from completion queue */
 	netcp_empty_rx_queue(netcp);
@@ -1703,7 +1707,6 @@
 	if (!netif_running(ndev))
 		return -EINVAL;
 
-	mutex_lock(&netcp_modules_lock);
 	for_each_module(netcp, intf_modpriv) {
 		module = intf_modpriv->netcp_module;
 		if (!module->ioctl)
@@ -1719,7 +1722,6 @@
 	}
 
 out:
-	mutex_unlock(&netcp_modules_lock);
 	return (ret == 0) ? 0 : err;
 }
 
@@ -1754,11 +1756,12 @@
 	struct netcp_intf *netcp = netdev_priv(ndev);
 	struct netcp_intf_modpriv *intf_modpriv;
 	struct netcp_module *module;
+	unsigned long flags;
 	int err = 0;
 
 	dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
 
-	mutex_lock(&netcp_modules_lock);
+	spin_lock_irqsave(&netcp->lock, flags);
 	for_each_module(netcp, intf_modpriv) {
 		module = intf_modpriv->netcp_module;
 		if ((module->add_vid) && (vid != 0)) {
@@ -1770,7 +1773,8 @@
 			}
 		}
 	}
-	mutex_unlock(&netcp_modules_lock);
+	spin_unlock_irqrestore(&netcp->lock, flags);
+
 	return err;
 }
 
@@ -1779,11 +1783,12 @@
 	struct netcp_intf *netcp = netdev_priv(ndev);
 	struct netcp_intf_modpriv *intf_modpriv;
 	struct netcp_module *module;
+	unsigned long flags;
 	int err = 0;
 
 	dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
 
-	mutex_lock(&netcp_modules_lock);
+	spin_lock_irqsave(&netcp->lock, flags);
 	for_each_module(netcp, intf_modpriv) {
 		module = intf_modpriv->netcp_module;
 		if (module->del_vid) {
@@ -1795,7 +1800,7 @@
 			}
 		}
 	}
-	mutex_unlock(&netcp_modules_lock);
+	spin_unlock_irqrestore(&netcp->lock, flags);
 	return err;
 }
 
@@ -2040,7 +2045,6 @@
 	struct device_node *child, *interfaces;
 	struct netcp_device *netcp_device;
 	struct device *dev = &pdev->dev;
-	struct netcp_module *module;
 	int ret;
 
 	if (!node) {
@@ -2087,14 +2091,6 @@
 	/* Add the device instance to the list */
 	list_add_tail(&netcp_device->device_list, &netcp_devices);
 
-	/* Probe & attach any modules already registered */
-	mutex_lock(&netcp_modules_lock);
-	for_each_netcp_module(module) {
-		ret = netcp_module_probe(netcp_device, module);
-		if (ret < 0)
-			dev_err(dev, "module(%s) probe failed\n", module->name);
-	}
-	mutex_unlock(&netcp_modules_lock);
 	return 0;
 
 probe_quit_interface:
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 6f16d6a..6bff8d8 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -77,6 +77,7 @@
 #define GBENU_ALE_OFFSET		0x1e000
 #define GBENU_HOST_PORT_NUM		0
 #define GBENU_NUM_ALE_ENTRIES		1024
+#define GBENU_SGMII_MODULE_SIZE		0x100
 
 /* 10G Ethernet SS defines */
 #define XGBE_MODULE_NAME		"netcp-xgbe"
@@ -149,8 +150,8 @@
 #define XGBE_STATS2_MODULE			2
 
 /* s: 0-based slave_port */
-#define SGMII_BASE(s) \
-	(((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
+#define SGMII_BASE(d, s) \
+	(((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
 
 #define GBE_TX_QUEUE				648
 #define	GBE_TXHOOK_ORDER			0
@@ -1997,13 +1998,8 @@
 		return;
 
 	if (!SLAVE_LINK_IS_XGMII(slave)) {
-		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
-			sgmii_link_state =
-				netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
-		else
-			sgmii_link_state =
-				netcp_sgmii_get_port_link(
-						gbe_dev->sgmii_port_regs, sp);
+		sgmii_link_state =
+			netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
 	}
 
 	phy_link_state = gbe_phy_link_status(slave);
@@ -2100,17 +2096,11 @@
 static void gbe_sgmii_rtreset(struct gbe_priv *priv,
 			      struct gbe_slave *slave, bool set)
 {
-	void __iomem *sgmii_port_regs;
-
 	if (SLAVE_LINK_IS_XGMII(slave))
 		return;
 
-	if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
-		sgmii_port_regs = priv->sgmii_port34_regs;
-	else
-		sgmii_port_regs = priv->sgmii_port_regs;
-
-	netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
+	netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
+			    slave->slave_num, set);
 }
 
 static void gbe_slave_stop(struct gbe_intf *intf)
@@ -2136,17 +2126,12 @@
 
 static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
 {
-	void __iomem *sgmii_port_regs;
+	if (SLAVE_LINK_IS_XGMII(slave))
+		return;
 
-	sgmii_port_regs = priv->sgmii_port_regs;
-	if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
-		sgmii_port_regs = priv->sgmii_port34_regs;
-
-	if (!SLAVE_LINK_IS_XGMII(slave)) {
-		netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
-		netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
-				   slave->link_interface);
-	}
+	netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
+	netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
+			   slave->link_interface);
 }
 
 static int gbe_slave_open(struct gbe_intf *gbe_intf)
@@ -2997,6 +2982,14 @@
 	gbe_dev->switch_regs = regs;
 
 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
+
+	/* Although sgmii modules are mem mapped to one contiguous
+	 * region on GBENU devices, setting sgmii_port34_regs allows
+	 * consistent code when accessing sgmii api
+	 */
+	gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
+				     (2 * GBENU_SGMII_MODULE_SIZE);
+
 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
 
 	for (i = 0; i < (gbe_dev->max_num_ports); i++)
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 2f1264b..d3d0947 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -17,7 +17,7 @@
 
 config VIA_RHINE
 	tristate "VIA Rhine support"
-	depends on (PCI || OF_IRQ)
+	depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP)
 	depends on HAS_DMA
 	select CRC32
 	select MII
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index 8cf9d4f..415de1e 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -59,16 +59,15 @@
 int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
 {
 	struct mii_bus *bus;
-	const u32 *bus_hz;
+	u32 bus_hz;
 	int clk_div;
-	int rc, size;
+	int rc;
 	struct resource res;
 
 	/* Calculate a reasonable divisor for the clock rate */
 	clk_div = 0x3f; /* worst-case default setting */
-	bus_hz = of_get_property(np, "clock-frequency", &size);
-	if (bus_hz && size >= sizeof(*bus_hz)) {
-		clk_div = (*bus_hz) / (2500 * 1000 * 2) - 1;
+	if (of_property_read_u32(np, "clock-frequency", &bus_hz) == 0) {
+		clk_div = bus_hz / (2500 * 1000 * 2) - 1;
 		if (clk_div < 1)
 			clk_div = 1;
 		if (clk_div > 0x3f)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 2a5a168..507bbb0 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -129,7 +129,6 @@
 {
 	int ret;
 	u32 clk_div, host_clock;
-	u32 *property_p;
 	struct mii_bus *bus;
 	struct resource res;
 	struct device_node *np1;
@@ -168,8 +167,7 @@
 		clk_div = DEFAULT_CLOCK_DIVISOR;
 		goto issue;
 	}
-	property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL);
-	if (!property_p) {
+	if (of_property_read_u32(np1, "clock-frequency", &host_clock)) {
 		netdev_warn(lp->ndev, "clock-frequency property not found.\n");
 		netdev_warn(lp->ndev,
 			    "Setting MDIO clock divisor to default %d\n",
@@ -179,7 +177,6 @@
 		goto issue;
 	}
 
-	host_clock = be32_to_cpup(property_p);
 	clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
 	/* If there is any remainder from the division of
 	 * fHOST / (MAX_MDIO_FREQ * 2), then we need to add
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 6008eee..cf468c8 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -828,6 +828,8 @@
 		if (!phydev)
 			dev_info(dev,
 				 "MDIO of the phy is not registered yet\n");
+		else
+			put_device(&phydev->dev);
 		return 0;
 	}
 
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index b5f4a78..2d3848c9d 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -1011,11 +1011,11 @@
 					set_bit(epidx, &irq_bit);
 				break;
 			}
+
+			hw->ep_shm_info[epidx].es_status =
+				info[epidx].es_status;
+			hw->ep_shm_info[epidx].zone = info[epidx].zone;
 		}
-
-		hw->ep_shm_info[epidx].es_status = info[epidx].es_status;
-		hw->ep_shm_info[epidx].zone = info[epidx].zone;
-
 		break;
 	}
 
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index da3259c..8f5c02e 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -126,6 +126,8 @@
 	__be32 addr;
 	int err;
 
+	iph = ip_hdr(skb); /* outer IP header... */
+
 	if (gs->collect_md) {
 		static u8 zero_vni[3];
 
@@ -133,7 +135,6 @@
 		addr = 0;
 	} else {
 		vni = gnvh->vni;
-		iph = ip_hdr(skb); /* Still outer IP header... */
 		addr = iph->saddr;
 	}
 
@@ -178,7 +179,6 @@
 
 	skb_reset_network_header(skb);
 
-	iph = ip_hdr(skb); /* Now inner IP header... */
 	err = IP_ECN_decapsulate(iph, skb);
 
 	if (unlikely(err)) {
@@ -626,6 +626,7 @@
 	struct geneve_sock *gs = geneve->sock;
 	struct ip_tunnel_info *info = NULL;
 	struct rtable *rt = NULL;
+	const struct iphdr *iip; /* interior IP header */
 	struct flowi4 fl4;
 	__u8 tos, ttl;
 	__be16 sport;
@@ -653,6 +654,8 @@
 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
 	skb_reset_mac_header(skb);
 
+	iip = ip_hdr(skb);
+
 	if (info) {
 		const struct ip_tunnel_key *key = &info->key;
 		u8 *opts = NULL;
@@ -668,19 +671,16 @@
 		if (unlikely(err))
 			goto err;
 
-		tos = key->tos;
+		tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
 		ttl = key->ttl;
 		df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
 	} else {
-		const struct iphdr *iip; /* interior IP header */
-
 		udp_csum = false;
 		err = geneve_build_skb(rt, skb, 0, geneve->vni,
 				       0, NULL, udp_csum);
 		if (unlikely(err))
 			goto err;
 
-		iip = ip_hdr(skb);
 		tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
 		ttl = geneve->ttl;
 		if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
@@ -748,12 +748,8 @@
 	dev->features    |= NETIF_F_RXCSUM;
 	dev->features    |= NETIF_F_GSO_SOFTWARE;
 
-	dev->vlan_features = dev->features;
-	dev->features    |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
-
 	dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
 	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
-	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
 
 	netif_keep_dst(dev);
 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
@@ -819,7 +815,7 @@
 
 static int geneve_configure(struct net *net, struct net_device *dev,
 			    __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos,
-			    __u16 dst_port, bool metadata)
+			    __be16 dst_port, bool metadata)
 {
 	struct geneve_net *gn = net_generic(net, geneve_net_id);
 	struct geneve_dev *t, *geneve = netdev_priv(dev);
@@ -844,10 +840,10 @@
 
 	geneve->ttl = ttl;
 	geneve->tos = tos;
-	geneve->dst_port = htons(dst_port);
+	geneve->dst_port = dst_port;
 	geneve->collect_md = metadata;
 
-	t = geneve_find_dev(gn, htons(dst_port), rem_addr, geneve->vni,
+	t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni,
 			    &tun_on_same_port, &tun_collect_md);
 	if (t)
 		return -EBUSY;
@@ -871,7 +867,7 @@
 static int geneve_newlink(struct net *net, struct net_device *dev,
 			  struct nlattr *tb[], struct nlattr *data[])
 {
-	__u16 dst_port = GENEVE_UDP_PORT;
+	__be16 dst_port = htons(GENEVE_UDP_PORT);
 	__u8 ttl = 0, tos = 0;
 	bool metadata = false;
 	__be32 rem_addr;
@@ -890,7 +886,7 @@
 		tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
 
 	if (data[IFLA_GENEVE_PORT])
-		dst_port = nla_get_u16(data[IFLA_GENEVE_PORT]);
+		dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]);
 
 	if (data[IFLA_GENEVE_COLLECT_METADATA])
 		metadata = true;
@@ -913,7 +909,7 @@
 		nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
 		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TTL */
 		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TOS */
-		nla_total_size(sizeof(__u16)) +  /* IFLA_GENEVE_PORT */
+		nla_total_size(sizeof(__be16)) +  /* IFLA_GENEVE_PORT */
 		nla_total_size(0) +	 /* IFLA_GENEVE_COLLECT_METADATA */
 		0;
 }
@@ -935,7 +931,7 @@
 	    nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
 		goto nla_put_failure;
 
-	if (nla_put_u16(skb, IFLA_GENEVE_PORT, ntohs(geneve->dst_port)))
+	if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port))
 		goto nla_put_failure;
 
 	if (geneve->collect_md) {
@@ -975,7 +971,7 @@
 	if (IS_ERR(dev))
 		return dev;
 
-	err = geneve_configure(net, dev, 0, 0, 0, 0, dst_port, true);
+	err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true);
 	if (err) {
 		free_netdev(dev);
 		return ERR_PTR(err);
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 1dd5ab8..ce5f1a2 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -32,10 +32,18 @@
 	  This driver can also be built as a module. To do so, say M here.
 	  the module will be called 'at86rf230'.
 
+config IEEE802154_AT86RF230_DEBUGFS
+	depends on IEEE802154_AT86RF230
+	bool "AT86RF230 debugfs interface"
+	depends on DEBUG_FS
+	---help---
+	  This option compiles debugfs code for the at86rf230 driver.
+
 config IEEE802154_MRF24J40
 	tristate "Microchip MRF24J40 transceiver driver"
 	depends on IEEE802154_DRIVERS && MAC802154
 	depends on SPI
+	select REGMAP_SPI
 	---help---
 	  Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
 	  controller.
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 6422caa..de6e4fa 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -31,6 +31,7 @@
 #include <linux/skbuff.h>
 #include <linux/of_gpio.h>
 #include <linux/ieee802154.h>
+#include <linux/debugfs.h>
 
 #include <net/mac802154.h>
 #include <net/cfg802154.h>
@@ -80,7 +81,16 @@
 	u8 from_state;
 	u8 to_state;
 
-	bool irq_enable;
+	bool free;
+};
+
+struct at86rf230_trac {
+	u64 success;
+	u64 success_data_pending;
+	u64 success_wait_for_ack;
+	u64 channel_access_failure;
+	u64 no_ack;
+	u64 invalid;
 };
 
 struct at86rf230_local {
@@ -95,14 +105,14 @@
 	struct completion state_complete;
 	struct at86rf230_state_change state;
 
-	struct at86rf230_state_change irq;
-
 	unsigned long cal_timeout;
 	bool is_tx;
 	bool is_tx_from_off;
 	u8 tx_retry;
 	struct sk_buff *tx_skb;
 	struct at86rf230_state_change tx;
+
+	struct at86rf230_trac trac;
 };
 
 #define AT86RF2XX_NUMREGS 0x3F
@@ -110,8 +120,7 @@
 static void
 at86rf230_async_state_change(struct at86rf230_local *lp,
 			     struct at86rf230_state_change *ctx,
-			     const u8 state, void (*complete)(void *context),
-			     const bool irq_enable);
+			     const u8 state, void (*complete)(void *context));
 
 static inline void
 at86rf230_sleep(struct at86rf230_local *lp)
@@ -340,8 +349,10 @@
 	struct at86rf230_local *lp = ctx->lp;
 
 	lp->is_tx = 0;
-	at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL, false);
+	at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL);
 	ieee802154_wake_queue(lp->hw);
+	if (ctx->free)
+		kfree(ctx);
 }
 
 static inline void
@@ -351,15 +362,14 @@
 	dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
 
 	at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
-				     at86rf230_async_error_recover, false);
+				     at86rf230_async_error_recover);
 }
 
 /* Generic function to get some register value in async mode */
 static void
-at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
+at86rf230_async_read_reg(struct at86rf230_local *lp, u8 reg,
 			 struct at86rf230_state_change *ctx,
-			 void (*complete)(void *context),
-			 const bool irq_enable)
+			 void (*complete)(void *context))
 {
 	int rc;
 
@@ -367,22 +377,24 @@
 
 	tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
 	ctx->msg.complete = complete;
-	ctx->irq_enable = irq_enable;
 	rc = spi_async(lp->spi, &ctx->msg);
-	if (rc) {
-		if (irq_enable)
-			enable_irq(ctx->irq);
-
+	if (rc)
 		at86rf230_async_error(lp, ctx, rc);
-	}
 }
 
-static inline u8 at86rf230_state_to_force(u8 state)
+static void
+at86rf230_async_write_reg(struct at86rf230_local *lp, u8 reg, u8 val,
+			  struct at86rf230_state_change *ctx,
+			  void (*complete)(void *context))
 {
-	if (state == STATE_TX_ON)
-		return STATE_FORCE_TX_ON;
-	else
-		return STATE_FORCE_TRX_OFF;
+	int rc;
+
+	ctx->buf[0] = (reg & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
+	ctx->buf[1] = val;
+	ctx->msg.complete = complete;
+	rc = spi_async(lp->spi, &ctx->msg);
+	if (rc)
+		at86rf230_async_error(lp, ctx, rc);
 }
 
 static void
@@ -426,12 +438,11 @@
 				u8 state = ctx->to_state;
 
 				if (lp->tx_retry >= AT86RF2XX_MAX_TX_RETRIES)
-					state = at86rf230_state_to_force(state);
+					state = STATE_FORCE_TRX_OFF;
 				lp->tx_retry++;
 
 				at86rf230_async_state_change(lp, ctx, state,
-							     ctx->complete,
-							     ctx->irq_enable);
+							     ctx->complete);
 				return;
 			}
 		}
@@ -452,8 +463,7 @@
 	struct at86rf230_local *lp = ctx->lp;
 
 	at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
-				 at86rf230_async_state_assert,
-				 ctx->irq_enable);
+				 at86rf230_async_state_assert);
 
 	return HRTIMER_NORESTART;
 }
@@ -558,14 +568,12 @@
 	struct at86rf230_local *lp = ctx->lp;
 	u8 *buf = ctx->buf;
 	const u8 trx_state = buf[1] & TRX_STATE_MASK;
-	int rc;
 
 	/* Check for "possible" STATE_TRANSITION_IN_PROGRESS */
 	if (trx_state == STATE_TRANSITION_IN_PROGRESS) {
 		udelay(1);
 		at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
-					 at86rf230_async_state_change_start,
-					 ctx->irq_enable);
+					 at86rf230_async_state_change_start);
 		return;
 	}
 
@@ -582,31 +590,20 @@
 	/* Going into the next step for a state change which do a timing
 	 * relevant delay.
 	 */
-	buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
-	buf[1] = ctx->to_state;
-	ctx->msg.complete = at86rf230_async_state_delay;
-	rc = spi_async(lp->spi, &ctx->msg);
-	if (rc) {
-		if (ctx->irq_enable)
-			enable_irq(ctx->irq);
-
-		at86rf230_async_error(lp, ctx, rc);
-	}
+	at86rf230_async_write_reg(lp, RG_TRX_STATE, ctx->to_state, ctx,
+				  at86rf230_async_state_delay);
 }
 
 static void
 at86rf230_async_state_change(struct at86rf230_local *lp,
 			     struct at86rf230_state_change *ctx,
-			     const u8 state, void (*complete)(void *context),
-			     const bool irq_enable)
+			     const u8 state, void (*complete)(void *context))
 {
 	/* Initialization for the state change context */
 	ctx->to_state = state;
 	ctx->complete = complete;
-	ctx->irq_enable = irq_enable;
 	at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
-				 at86rf230_async_state_change_start,
-				 irq_enable);
+				 at86rf230_async_state_change_start);
 }
 
 static void
@@ -628,8 +625,7 @@
 	unsigned long rc;
 
 	at86rf230_async_state_change(lp, &lp->state, state,
-				     at86rf230_sync_state_change_complete,
-				     false);
+				     at86rf230_sync_state_change_complete);
 
 	rc = wait_for_completion_timeout(&lp->state_complete,
 					 msecs_to_jiffies(100));
@@ -647,9 +643,8 @@
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
 
-	enable_irq(ctx->irq);
-
 	ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
+	kfree(ctx);
 }
 
 static void
@@ -659,7 +654,7 @@
 	struct at86rf230_local *lp = ctx->lp;
 
 	at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
-				     at86rf230_tx_complete, true);
+				     at86rf230_tx_complete);
 }
 
 static void
@@ -667,28 +662,33 @@
 {
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
-	const u8 *buf = ctx->buf;
-	const u8 trac = (buf[1] & 0xe0) >> 5;
 
-	/* If trac status is different than zero we need to do a state change
-	 * to STATE_FORCE_TRX_OFF then STATE_RX_AACK_ON to recover the
-	 * transceiver.
-	 */
-	if (trac)
-		at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
-					     at86rf230_tx_on, true);
-	else
-		at86rf230_tx_on(context);
-}
+	if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) {
+		u8 trac = TRAC_MASK(ctx->buf[1]);
 
-static void
-at86rf230_tx_trac_status(void *context)
-{
-	struct at86rf230_state_change *ctx = context;
-	struct at86rf230_local *lp = ctx->lp;
+		switch (trac) {
+		case TRAC_SUCCESS:
+			lp->trac.success++;
+			break;
+		case TRAC_SUCCESS_DATA_PENDING:
+			lp->trac.success_data_pending++;
+			break;
+		case TRAC_CHANNEL_ACCESS_FAILURE:
+			lp->trac.channel_access_failure++;
+			break;
+		case TRAC_NO_ACK:
+			lp->trac.no_ack++;
+			break;
+		case TRAC_INVALID:
+			lp->trac.invalid++;
+			break;
+		default:
+			WARN_ONCE(1, "received tx trac status %d\n", trac);
+			break;
+		}
+	}
 
-	at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
-				 at86rf230_tx_trac_check, true);
+	at86rf230_async_state_change(lp, ctx, STATE_TX_ON, at86rf230_tx_on);
 }
 
 static void
@@ -696,7 +696,6 @@
 {
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
-	u8 rx_local_buf[AT86RF2XX_MAX_BUF];
 	const u8 *buf = ctx->buf;
 	struct sk_buff *skb;
 	u8 len, lqi;
@@ -708,63 +707,68 @@
 	}
 	lqi = buf[2 + len];
 
-	memcpy(rx_local_buf, buf + 2, len);
-	ctx->trx.len = 2;
-	enable_irq(ctx->irq);
-
 	skb = dev_alloc_skb(IEEE802154_MTU);
 	if (!skb) {
 		dev_vdbg(&lp->spi->dev, "failed to allocate sk_buff\n");
+		kfree(ctx);
 		return;
 	}
 
-	memcpy(skb_put(skb, len), rx_local_buf, len);
+	memcpy(skb_put(skb, len), buf + 2, len);
 	ieee802154_rx_irqsafe(lp->hw, skb, lqi);
+	kfree(ctx);
 }
 
 static void
-at86rf230_rx_read_frame(void *context)
+at86rf230_rx_trac_check(void *context)
 {
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
 	u8 *buf = ctx->buf;
 	int rc;
 
+	if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) {
+		u8 trac = TRAC_MASK(buf[1]);
+
+		switch (trac) {
+		case TRAC_SUCCESS:
+			lp->trac.success++;
+			break;
+		case TRAC_SUCCESS_WAIT_FOR_ACK:
+			lp->trac.success_wait_for_ack++;
+			break;
+		case TRAC_INVALID:
+			lp->trac.invalid++;
+			break;
+		default:
+			WARN_ONCE(1, "received rx trac status %d\n", trac);
+			break;
+		}
+	}
+
 	buf[0] = CMD_FB;
 	ctx->trx.len = AT86RF2XX_MAX_BUF;
 	ctx->msg.complete = at86rf230_rx_read_frame_complete;
 	rc = spi_async(lp->spi, &ctx->msg);
 	if (rc) {
 		ctx->trx.len = 2;
-		enable_irq(ctx->irq);
 		at86rf230_async_error(lp, ctx, rc);
 	}
 }
 
 static void
-at86rf230_rx_trac_check(void *context)
+at86rf230_irq_trx_end(void *context)
 {
-	/* Possible check on trac status here. This could be useful to make
-	 * some stats why receive is failed. Not used at the moment, but it's
-	 * maybe timing relevant. Datasheet doesn't say anything about this.
-	 * The programming guide say do it so.
-	 */
+	struct at86rf230_state_change *ctx = context;
+	struct at86rf230_local *lp = ctx->lp;
 
-	at86rf230_rx_read_frame(context);
-}
-
-static void
-at86rf230_irq_trx_end(struct at86rf230_local *lp)
-{
 	if (lp->is_tx) {
 		lp->is_tx = 0;
-		at86rf230_async_state_change(lp, &lp->irq,
-					     STATE_FORCE_TX_ON,
-					     at86rf230_tx_trac_status,
-					     true);
+		at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
+					 at86rf230_tx_trac_check);
 	} else {
-		at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
-					 at86rf230_rx_trac_check, true);
+		at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
+					 at86rf230_rx_trac_check);
 	}
 }
 
@@ -774,32 +778,59 @@
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
 	const u8 *buf = ctx->buf;
-	const u8 irq = buf[1];
+	u8 irq = buf[1];
+
+	enable_irq(lp->spi->irq);
 
 	if (irq & IRQ_TRX_END) {
-		at86rf230_irq_trx_end(lp);
+		at86rf230_irq_trx_end(ctx);
 	} else {
-		enable_irq(ctx->irq);
 		dev_err(&lp->spi->dev, "not supported irq %02x received\n",
 			irq);
+		kfree(ctx);
 	}
 }
 
+static void
+at86rf230_setup_spi_messages(struct at86rf230_local *lp,
+			     struct at86rf230_state_change *state)
+{
+	state->lp = lp;
+	state->irq = lp->spi->irq;
+	spi_message_init(&state->msg);
+	state->msg.context = state;
+	state->trx.len = 2;
+	state->trx.tx_buf = state->buf;
+	state->trx.rx_buf = state->buf;
+	spi_message_add_tail(&state->trx, &state->msg);
+	hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	state->timer.function = at86rf230_async_state_timer;
+}
+
 static irqreturn_t at86rf230_isr(int irq, void *data)
 {
 	struct at86rf230_local *lp = data;
-	struct at86rf230_state_change *ctx = &lp->irq;
-	u8 *buf = ctx->buf;
+	struct at86rf230_state_change *ctx;
 	int rc;
 
 	disable_irq_nosync(irq);
 
-	buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
+	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+	if (!ctx) {
+		enable_irq(irq);
+		return IRQ_NONE;
+	}
+
+	at86rf230_setup_spi_messages(lp, ctx);
+	/* tell on error handling to free ctx */
+	ctx->free = true;
+
+	ctx->buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
 	ctx->msg.complete = at86rf230_irq_status;
 	rc = spi_async(lp->spi, &ctx->msg);
 	if (rc) {
-		enable_irq(irq);
 		at86rf230_async_error(lp, ctx, rc);
+		enable_irq(irq);
 		return IRQ_NONE;
 	}
 
@@ -811,21 +842,14 @@
 {
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
-	u8 *buf = ctx->buf;
-	int rc;
 
 	ctx->trx.len = 2;
 
-	if (gpio_is_valid(lp->slp_tr)) {
+	if (gpio_is_valid(lp->slp_tr))
 		at86rf230_slp_tr_rising_edge(lp);
-	} else {
-		buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
-		buf[1] = STATE_BUSY_TX;
-		ctx->msg.complete = NULL;
-		rc = spi_async(lp->spi, &ctx->msg);
-		if (rc)
-			at86rf230_async_error(lp, ctx, rc);
-	}
+	else
+		at86rf230_async_write_reg(lp, RG_TRX_STATE, STATE_BUSY_TX, ctx,
+					  NULL);
 }
 
 static void
@@ -858,7 +882,7 @@
 	struct at86rf230_local *lp = ctx->lp;
 
 	at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
-				     at86rf230_write_frame, false);
+				     at86rf230_write_frame);
 }
 
 static void
@@ -871,12 +895,10 @@
 	if (lp->is_tx_from_off) {
 		lp->is_tx_from_off = false;
 		at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
-					     at86rf230_write_frame,
-					     false);
+					     at86rf230_write_frame);
 	} else {
 		at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
-					     at86rf230_xmit_tx_on,
-					     false);
+					     at86rf230_xmit_tx_on);
 	}
 }
 
@@ -899,7 +921,7 @@
 	if (time_is_before_jiffies(lp->cal_timeout)) {
 		lp->is_tx_from_off = true;
 		at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF,
-					     at86rf230_xmit_start, false);
+					     at86rf230_xmit_start);
 	} else {
 		at86rf230_xmit_start(ctx);
 	}
@@ -920,6 +942,10 @@
 {
 	struct at86rf230_local *lp = hw->priv;
 
+	/* reset trac stats on start */
+	if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS))
+		memset(&lp->trac, 0, sizeof(struct at86rf230_trac));
+
 	at86rf230_awake(lp);
 	enable_irq(lp->spi->irq);
 
@@ -1354,10 +1380,6 @@
 		return rc;
 
 	irq_type = irq_get_trigger_type(lp->spi->irq);
-	if (irq_type == IRQ_TYPE_EDGE_RISING ||
-	    irq_type == IRQ_TYPE_EDGE_FALLING)
-		dev_warn(&lp->spi->dev,
-			 "Using edge triggered irq's are not recommended!\n");
 	if (irq_type == IRQ_TYPE_EDGE_FALLING ||
 	    irq_type == IRQ_TYPE_LEVEL_LOW)
 		irq_pol = IRQ_ACTIVE_LOW;
@@ -1583,43 +1605,66 @@
 	return rc;
 }
 
-static void
-at86rf230_setup_spi_messages(struct at86rf230_local *lp)
+#ifdef CONFIG_IEEE802154_AT86RF230_DEBUGFS
+static struct dentry *at86rf230_debugfs_root;
+
+static int at86rf230_stats_show(struct seq_file *file, void *offset)
 {
-	lp->state.lp = lp;
-	lp->state.irq = lp->spi->irq;
-	spi_message_init(&lp->state.msg);
-	lp->state.msg.context = &lp->state;
-	lp->state.trx.len = 2;
-	lp->state.trx.tx_buf = lp->state.buf;
-	lp->state.trx.rx_buf = lp->state.buf;
-	spi_message_add_tail(&lp->state.trx, &lp->state.msg);
-	hrtimer_init(&lp->state.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	lp->state.timer.function = at86rf230_async_state_timer;
+	struct at86rf230_local *lp = file->private;
 
-	lp->irq.lp = lp;
-	lp->irq.irq = lp->spi->irq;
-	spi_message_init(&lp->irq.msg);
-	lp->irq.msg.context = &lp->irq;
-	lp->irq.trx.len = 2;
-	lp->irq.trx.tx_buf = lp->irq.buf;
-	lp->irq.trx.rx_buf = lp->irq.buf;
-	spi_message_add_tail(&lp->irq.trx, &lp->irq.msg);
-	hrtimer_init(&lp->irq.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	lp->irq.timer.function = at86rf230_async_state_timer;
-
-	lp->tx.lp = lp;
-	lp->tx.irq = lp->spi->irq;
-	spi_message_init(&lp->tx.msg);
-	lp->tx.msg.context = &lp->tx;
-	lp->tx.trx.len = 2;
-	lp->tx.trx.tx_buf = lp->tx.buf;
-	lp->tx.trx.rx_buf = lp->tx.buf;
-	spi_message_add_tail(&lp->tx.trx, &lp->tx.msg);
-	hrtimer_init(&lp->tx.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	lp->tx.timer.function = at86rf230_async_state_timer;
+	seq_printf(file, "SUCCESS:\t\t%8llu\n", lp->trac.success);
+	seq_printf(file, "SUCCESS_DATA_PENDING:\t%8llu\n",
+		   lp->trac.success_data_pending);
+	seq_printf(file, "SUCCESS_WAIT_FOR_ACK:\t%8llu\n",
+		   lp->trac.success_wait_for_ack);
+	seq_printf(file, "CHANNEL_ACCESS_FAILURE:\t%8llu\n",
+		   lp->trac.channel_access_failure);
+	seq_printf(file, "NO_ACK:\t\t\t%8llu\n", lp->trac.no_ack);
+	seq_printf(file, "INVALID:\t\t%8llu\n", lp->trac.invalid);
+	return 0;
 }
 
+static int at86rf230_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, at86rf230_stats_show, inode->i_private);
+}
+
+static const struct file_operations at86rf230_stats_fops = {
+	.open		= at86rf230_stats_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int at86rf230_debugfs_init(struct at86rf230_local *lp)
+{
+	char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "at86rf230-";
+	struct dentry *stats;
+
+	strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
+
+	at86rf230_debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+	if (!at86rf230_debugfs_root)
+		return -ENOMEM;
+
+	stats = debugfs_create_file("trac_stats", S_IRUGO,
+				    at86rf230_debugfs_root, lp,
+				    &at86rf230_stats_fops);
+	if (!stats)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void at86rf230_debugfs_remove(void)
+{
+	debugfs_remove_recursive(at86rf230_debugfs_root);
+}
+#else
+static int at86rf230_debugfs_init(struct at86rf230_local *lp) { return 0; }
+static void at86rf230_debugfs_remove(void) { }
+#endif
+
 static int at86rf230_probe(struct spi_device *spi)
 {
 	struct ieee802154_hw *hw;
@@ -1681,7 +1726,8 @@
 		goto free_dev;
 	}
 
-	at86rf230_setup_spi_messages(lp);
+	at86rf230_setup_spi_messages(lp, &lp->state);
+	at86rf230_setup_spi_messages(lp, &lp->tx);
 
 	rc = at86rf230_detect_device(lp);
 	if (rc < 0)
@@ -1715,12 +1761,18 @@
 	/* going into sleep by default */
 	at86rf230_sleep(lp);
 
-	rc = ieee802154_register_hw(lp->hw);
+	rc = at86rf230_debugfs_init(lp);
 	if (rc)
 		goto free_dev;
 
+	rc = ieee802154_register_hw(lp->hw);
+	if (rc)
+		goto free_debugfs;
+
 	return rc;
 
+free_debugfs:
+	at86rf230_debugfs_remove();
 free_dev:
 	ieee802154_free_hw(lp->hw);
 
@@ -1735,6 +1787,7 @@
 	at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
 	ieee802154_unregister_hw(lp->hw);
 	ieee802154_free_hw(lp->hw);
+	at86rf230_debugfs_remove();
 	dev_dbg(&spi->dev, "unregistered at86rf230\n");
 
 	return 0;
diff --git a/drivers/net/ieee802154/at86rf230.h b/drivers/net/ieee802154/at86rf230.h
index 1e6d1cc..fd9c1f4 100644
--- a/drivers/net/ieee802154/at86rf230.h
+++ b/drivers/net/ieee802154/at86rf230.h
@@ -216,5 +216,13 @@
 #define STATE_TRANSITION_IN_PROGRESS 0x1F
 
 #define TRX_STATE_MASK		(0x1F)
+#define TRAC_MASK(x)		((x & 0xe0) >> 5)
+
+#define TRAC_SUCCESS			0
+#define TRAC_SUCCESS_DATA_PENDING	1
+#define TRAC_SUCCESS_WAIT_FOR_ACK	2
+#define TRAC_CHANNEL_ACCESS_FAILURE	3
+#define TRAC_NO_ACK			5
+#define TRAC_INVALID			7
 
 #endif /* !_AT86RF230_H */
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 80dfc72..199a94a 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -559,6 +559,7 @@
 {
 	struct usb_device *usb_dev = atusb->usb_dev;
 	uint8_t man_id_0, man_id_1, part_num, version_num;
+	const char *chip;
 
 	man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0);
 	man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1);
@@ -574,14 +575,22 @@
 			man_id_1, man_id_0);
 		goto fail;
 	}
-	if (part_num != 3 && part_num != 2) {
+
+	switch (part_num) {
+	case 2:
+		chip = "AT86RF230";
+		break;
+	case 3:
+		chip = "AT86RF231";
+		break;
+	default:
 		dev_err(&usb_dev->dev,
 			"unexpected transceiver, part 0x%02x version 0x%02x\n",
 			part_num, version_num);
 		goto fail;
 	}
 
-	dev_info(&usb_dev->dev, "ATUSB: AT86RF231 version %d\n", version_num);
+	dev_info(&usb_dev->dev, "ATUSB: %s version %d\n", chip, version_num);
 
 	return 0;
 
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 997724b..aca0fb3 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -18,51 +18,172 @@
 #include <linux/spi/spi.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/regmap.h>
 #include <linux/ieee802154.h>
+#include <linux/irq.h>
 #include <net/cfg802154.h>
 #include <net/mac802154.h>
 
 /* MRF24J40 Short Address Registers */
-#define REG_RXMCR    0x00  /* Receive MAC control */
-#define REG_PANIDL   0x01  /* PAN ID (low) */
-#define REG_PANIDH   0x02  /* PAN ID (high) */
-#define REG_SADRL    0x03  /* Short address (low) */
-#define REG_SADRH    0x04  /* Short address (high) */
-#define REG_EADR0    0x05  /* Long address (low) (high is EADR7) */
-#define REG_TXMCR    0x11  /* Transmit MAC control */
-#define REG_PACON0   0x16  /* Power Amplifier Control */
-#define REG_PACON1   0x17  /* Power Amplifier Control */
-#define REG_PACON2   0x18  /* Power Amplifier Control */
-#define REG_TXNCON   0x1B  /* Transmit Normal FIFO Control */
-#define REG_TXSTAT   0x24  /* TX MAC Status Register */
-#define REG_SOFTRST  0x2A  /* Soft Reset */
-#define REG_TXSTBL   0x2E  /* TX Stabilization */
-#define REG_INTSTAT  0x31  /* Interrupt Status */
-#define REG_INTCON   0x32  /* Interrupt Control */
-#define REG_GPIO     0x33  /* GPIO */
-#define REG_TRISGPIO 0x34  /* GPIO direction */
-#define REG_RFCTL    0x36  /* RF Control Mode Register */
-#define REG_BBREG1   0x39  /* Baseband Registers */
-#define REG_BBREG2   0x3A  /* */
-#define REG_BBREG6   0x3E  /* */
-#define REG_CCAEDTH  0x3F  /* Energy Detection Threshold */
+#define REG_RXMCR	0x00  /* Receive MAC control */
+#define BIT_PROMI	BIT(0)
+#define BIT_ERRPKT	BIT(1)
+#define BIT_NOACKRSP	BIT(5)
+#define BIT_PANCOORD	BIT(3)
+
+#define REG_PANIDL	0x01  /* PAN ID (low) */
+#define REG_PANIDH	0x02  /* PAN ID (high) */
+#define REG_SADRL	0x03  /* Short address (low) */
+#define REG_SADRH	0x04  /* Short address (high) */
+#define REG_EADR0	0x05  /* Long address (low) (high is EADR7) */
+#define REG_EADR1	0x06
+#define REG_EADR2	0x07
+#define REG_EADR3	0x08
+#define REG_EADR4	0x09
+#define REG_EADR5	0x0A
+#define REG_EADR6	0x0B
+#define REG_EADR7	0x0C
+#define REG_RXFLUSH	0x0D
+#define REG_ORDER	0x10
+#define REG_TXMCR	0x11  /* Transmit MAC control */
+#define TXMCR_MIN_BE_SHIFT		3
+#define TXMCR_MIN_BE_MASK		0x18
+#define TXMCR_CSMA_RETRIES_SHIFT	0
+#define TXMCR_CSMA_RETRIES_MASK		0x07
+
+#define REG_ACKTMOUT	0x12
+#define REG_ESLOTG1	0x13
+#define REG_SYMTICKL	0x14
+#define REG_SYMTICKH	0x15
+#define REG_PACON0	0x16  /* Power Amplifier Control */
+#define REG_PACON1	0x17  /* Power Amplifier Control */
+#define REG_PACON2	0x18  /* Power Amplifier Control */
+#define REG_TXBCON0	0x1A
+#define REG_TXNCON	0x1B  /* Transmit Normal FIFO Control */
+#define BIT_TXNTRIG	BIT(0)
+#define BIT_TXNACKREQ	BIT(2)
+
+#define REG_TXG1CON	0x1C
+#define REG_TXG2CON	0x1D
+#define REG_ESLOTG23	0x1E
+#define REG_ESLOTG45	0x1F
+#define REG_ESLOTG67	0x20
+#define REG_TXPEND	0x21
+#define REG_WAKECON	0x22
+#define REG_FROMOFFSET	0x23
+#define REG_TXSTAT	0x24  /* TX MAC Status Register */
+#define REG_TXBCON1	0x25
+#define REG_GATECLK	0x26
+#define REG_TXTIME	0x27
+#define REG_HSYMTMRL	0x28
+#define REG_HSYMTMRH	0x29
+#define REG_SOFTRST	0x2A  /* Soft Reset */
+#define REG_SECCON0	0x2C
+#define REG_SECCON1	0x2D
+#define REG_TXSTBL	0x2E  /* TX Stabilization */
+#define REG_RXSR	0x30
+#define REG_INTSTAT	0x31  /* Interrupt Status */
+#define BIT_TXNIF	BIT(0)
+#define BIT_RXIF	BIT(3)
+
+#define REG_INTCON	0x32  /* Interrupt Control */
+#define BIT_TXNIE	BIT(0)
+#define BIT_RXIE	BIT(3)
+
+#define REG_GPIO	0x33  /* GPIO */
+#define REG_TRISGPIO	0x34  /* GPIO direction */
+#define REG_SLPACK	0x35
+#define REG_RFCTL	0x36  /* RF Control Mode Register */
+#define BIT_RFRST	BIT(2)
+
+#define REG_SECCR2	0x37
+#define REG_BBREG0	0x38
+#define REG_BBREG1	0x39  /* Baseband Registers */
+#define BIT_RXDECINV	BIT(2)
+
+#define REG_BBREG2	0x3A  /* */
+#define BBREG2_CCA_MODE_SHIFT	6
+#define BBREG2_CCA_MODE_MASK	0xc0
+
+#define REG_BBREG3	0x3B
+#define REG_BBREG4	0x3C
+#define REG_BBREG6	0x3E  /* */
+#define REG_CCAEDTH	0x3F  /* Energy Detection Threshold */
 
 /* MRF24J40 Long Address Registers */
-#define REG_RFCON0     0x200  /* RF Control Registers */
-#define REG_RFCON1     0x201
-#define REG_RFCON2     0x202
-#define REG_RFCON3     0x203
-#define REG_RFCON5     0x205
-#define REG_RFCON6     0x206
-#define REG_RFCON7     0x207
-#define REG_RFCON8     0x208
-#define REG_RSSI       0x210
-#define REG_SLPCON0    0x211  /* Sleep Clock Control Registers */
-#define REG_SLPCON1    0x220
-#define REG_WAKETIMEL  0x222  /* Wake-up Time Match Value Low */
-#define REG_WAKETIMEH  0x223  /* Wake-up Time Match Value High */
-#define REG_TESTMODE   0x22F  /* Test mode */
-#define REG_RX_FIFO    0x300  /* Receive FIFO */
+#define REG_RFCON0	0x200  /* RF Control Registers */
+#define RFCON0_CH_SHIFT	4
+#define RFCON0_CH_MASK	0xf0
+#define RFOPT_RECOMMEND	3
+
+#define REG_RFCON1	0x201
+#define REG_RFCON2	0x202
+#define REG_RFCON3	0x203
+
+#define TXPWRL_MASK	0xc0
+#define TXPWRL_SHIFT	6
+#define TXPWRL_30	0x3
+#define TXPWRL_20	0x2
+#define TXPWRL_10	0x1
+#define TXPWRL_0	0x0
+
+#define TXPWRS_MASK	0x38
+#define TXPWRS_SHIFT	3
+#define TXPWRS_6_3	0x7
+#define TXPWRS_4_9	0x6
+#define TXPWRS_3_7	0x5
+#define TXPWRS_2_8	0x4
+#define TXPWRS_1_9	0x3
+#define TXPWRS_1_2	0x2
+#define TXPWRS_0_5	0x1
+#define TXPWRS_0	0x0
+
+#define REG_RFCON5	0x205
+#define REG_RFCON6	0x206
+#define REG_RFCON7	0x207
+#define REG_RFCON8	0x208
+#define REG_SLPCAL0	0x209
+#define REG_SLPCAL1	0x20A
+#define REG_SLPCAL2	0x20B
+#define REG_RFSTATE	0x20F
+#define REG_RSSI	0x210
+#define REG_SLPCON0	0x211  /* Sleep Clock Control Registers */
+#define BIT_INTEDGE	BIT(1)
+
+#define REG_SLPCON1	0x220
+#define REG_WAKETIMEL	0x222  /* Wake-up Time Match Value Low */
+#define REG_WAKETIMEH	0x223  /* Wake-up Time Match Value High */
+#define REG_REMCNTL	0x224
+#define REG_REMCNTH	0x225
+#define REG_MAINCNT0	0x226
+#define REG_MAINCNT1	0x227
+#define REG_MAINCNT2	0x228
+#define REG_MAINCNT3	0x229
+#define REG_TESTMODE	0x22F  /* Test mode */
+#define REG_ASSOEAR0	0x230
+#define REG_ASSOEAR1	0x231
+#define REG_ASSOEAR2	0x232
+#define REG_ASSOEAR3	0x233
+#define REG_ASSOEAR4	0x234
+#define REG_ASSOEAR5	0x235
+#define REG_ASSOEAR6	0x236
+#define REG_ASSOEAR7	0x237
+#define REG_ASSOSAR0	0x238
+#define REG_ASSOSAR1	0x239
+#define REG_UNONCE0	0x240
+#define REG_UNONCE1	0x241
+#define REG_UNONCE2	0x242
+#define REG_UNONCE3	0x243
+#define REG_UNONCE4	0x244
+#define REG_UNONCE5	0x245
+#define REG_UNONCE6	0x246
+#define REG_UNONCE7	0x247
+#define REG_UNONCE8	0x248
+#define REG_UNONCE9	0x249
+#define REG_UNONCE10	0x24A
+#define REG_UNONCE11	0x24B
+#define REG_UNONCE12	0x24C
+#define REG_RX_FIFO	0x300  /* Receive FIFO */
 
 /* Device configuration: Only channels 11-26 on page 0 are supported. */
 #define MRF24J40_CHAN_MIN 11
@@ -81,11 +202,52 @@
 	struct spi_device *spi;
 	struct ieee802154_hw *hw;
 
-	struct mutex buffer_mutex; /* only used to protect buf */
-	struct completion tx_complete;
-	u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
+	struct regmap *regmap_short;
+	struct regmap *regmap_long;
+
+	/* for writing txfifo */
+	struct spi_message tx_msg;
+	u8 tx_hdr_buf[2];
+	struct spi_transfer tx_hdr_trx;
+	u8 tx_len_buf[2];
+	struct spi_transfer tx_len_trx;
+	struct spi_transfer tx_buf_trx;
+	struct sk_buff *tx_skb;
+
+	/* post transmit message to send frame out  */
+	struct spi_message tx_post_msg;
+	u8 tx_post_buf[2];
+	struct spi_transfer tx_post_trx;
+
+	/* for protect/unprotect/read length rxfifo */
+	struct spi_message rx_msg;
+	u8 rx_buf[3];
+	struct spi_transfer rx_trx;
+
+	/* receive handling */
+	struct spi_message rx_buf_msg;
+	u8 rx_addr_buf[2];
+	struct spi_transfer rx_addr_trx;
+	u8 rx_lqi_buf[2];
+	struct spi_transfer rx_lqi_trx;
+	u8 rx_fifo_buf[RX_FIFO_SIZE];
+	struct spi_transfer rx_fifo_buf_trx;
+
+	/* isr handling for reading intstat */
+	struct spi_message irq_msg;
+	u8 irq_buf[2];
+	struct spi_transfer irq_trx;
 };
 
+/* regmap information for short address register access */
+#define MRF24J40_SHORT_WRITE	0x01
+#define MRF24J40_SHORT_READ	0x00
+#define MRF24J40_SHORT_NUMREGS	0x3F
+
+/* regmap information for long address register access */
+#define MRF24J40_LONG_ACCESS	0x80
+#define MRF24J40_LONG_NUMREGS	0x38F
+
 /* Read/Write SPI Commands for Short and Long Address registers. */
 #define MRF24J40_READSHORT(reg) ((reg) << 1)
 #define MRF24J40_WRITESHORT(reg) ((reg) << 1 | 1)
@@ -97,118 +259,304 @@
 
 #define printdev(X) (&X->spi->dev)
 
-static int write_short_reg(struct mrf24j40 *devrec, u8 reg, u8 value)
+static bool
+mrf24j40_short_reg_writeable(struct device *dev, unsigned int reg)
 {
-	int ret;
-	struct spi_message msg;
-	struct spi_transfer xfer = {
-		.len = 2,
-		.tx_buf = devrec->buf,
-		.rx_buf = devrec->buf,
-	};
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
-
-	mutex_lock(&devrec->buffer_mutex);
-	devrec->buf[0] = MRF24J40_WRITESHORT(reg);
-	devrec->buf[1] = value;
-
-	ret = spi_sync(devrec->spi, &msg);
-	if (ret)
-		dev_err(printdev(devrec),
-			"SPI write Failed for short register 0x%hhx\n", reg);
-
-	mutex_unlock(&devrec->buffer_mutex);
-	return ret;
+	switch (reg) {
+	case REG_RXMCR:
+	case REG_PANIDL:
+	case REG_PANIDH:
+	case REG_SADRL:
+	case REG_SADRH:
+	case REG_EADR0:
+	case REG_EADR1:
+	case REG_EADR2:
+	case REG_EADR3:
+	case REG_EADR4:
+	case REG_EADR5:
+	case REG_EADR6:
+	case REG_EADR7:
+	case REG_RXFLUSH:
+	case REG_ORDER:
+	case REG_TXMCR:
+	case REG_ACKTMOUT:
+	case REG_ESLOTG1:
+	case REG_SYMTICKL:
+	case REG_SYMTICKH:
+	case REG_PACON0:
+	case REG_PACON1:
+	case REG_PACON2:
+	case REG_TXBCON0:
+	case REG_TXNCON:
+	case REG_TXG1CON:
+	case REG_TXG2CON:
+	case REG_ESLOTG23:
+	case REG_ESLOTG45:
+	case REG_ESLOTG67:
+	case REG_TXPEND:
+	case REG_WAKECON:
+	case REG_FROMOFFSET:
+	case REG_TXBCON1:
+	case REG_GATECLK:
+	case REG_TXTIME:
+	case REG_HSYMTMRL:
+	case REG_HSYMTMRH:
+	case REG_SOFTRST:
+	case REG_SECCON0:
+	case REG_SECCON1:
+	case REG_TXSTBL:
+	case REG_RXSR:
+	case REG_INTCON:
+	case REG_TRISGPIO:
+	case REG_GPIO:
+	case REG_RFCTL:
+	case REG_SLPACK:
+	case REG_BBREG0:
+	case REG_BBREG1:
+	case REG_BBREG2:
+	case REG_BBREG3:
+	case REG_BBREG4:
+	case REG_BBREG6:
+	case REG_CCAEDTH:
+		return true;
+	default:
+		return false;
+	}
 }
 
-static int read_short_reg(struct mrf24j40 *devrec, u8 reg, u8 *val)
+static bool
+mrf24j40_short_reg_readable(struct device *dev, unsigned int reg)
 {
-	int ret = -1;
-	struct spi_message msg;
-	struct spi_transfer xfer = {
-		.len = 2,
-		.tx_buf = devrec->buf,
-		.rx_buf = devrec->buf,
-	};
+	bool rc;
 
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
+	/* all writeable are also readable */
+	rc = mrf24j40_short_reg_writeable(dev, reg);
+	if (rc)
+		return rc;
 
-	mutex_lock(&devrec->buffer_mutex);
-	devrec->buf[0] = MRF24J40_READSHORT(reg);
-	devrec->buf[1] = 0;
-
-	ret = spi_sync(devrec->spi, &msg);
-	if (ret)
-		dev_err(printdev(devrec),
-			"SPI read Failed for short register 0x%hhx\n", reg);
-	else
-		*val = devrec->buf[1];
-
-	mutex_unlock(&devrec->buffer_mutex);
-	return ret;
+	/* readonly regs */
+	switch (reg) {
+	case REG_TXSTAT:
+	case REG_INTSTAT:
+		return true;
+	default:
+		return false;
+	}
 }
 
-static int read_long_reg(struct mrf24j40 *devrec, u16 reg, u8 *value)
+static bool
+mrf24j40_short_reg_volatile(struct device *dev, unsigned int reg)
 {
-	int ret;
-	u16 cmd;
-	struct spi_message msg;
-	struct spi_transfer xfer = {
-		.len = 3,
-		.tx_buf = devrec->buf,
-		.rx_buf = devrec->buf,
-	};
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
-
-	cmd = MRF24J40_READLONG(reg);
-	mutex_lock(&devrec->buffer_mutex);
-	devrec->buf[0] = cmd >> 8 & 0xff;
-	devrec->buf[1] = cmd & 0xff;
-	devrec->buf[2] = 0;
-
-	ret = spi_sync(devrec->spi, &msg);
-	if (ret)
-		dev_err(printdev(devrec),
-			"SPI read Failed for long register 0x%hx\n", reg);
-	else
-		*value = devrec->buf[2];
-
-	mutex_unlock(&devrec->buffer_mutex);
-	return ret;
+	/* can be changed during runtime */
+	switch (reg) {
+	case REG_TXSTAT:
+	case REG_INTSTAT:
+	case REG_RXFLUSH:
+	case REG_TXNCON:
+	case REG_SOFTRST:
+	case REG_RFCTL:
+	case REG_TXBCON0:
+	case REG_TXG1CON:
+	case REG_TXG2CON:
+	case REG_TXBCON1:
+	case REG_SECCON0:
+	case REG_RXSR:
+	case REG_SLPACK:
+	case REG_SECCR2:
+	case REG_BBREG6:
+	/* use them in spi_async and regmap so it's volatile */
+	case REG_BBREG1:
+		return true;
+	default:
+		return false;
+	}
 }
 
-static int write_long_reg(struct mrf24j40 *devrec, u16 reg, u8 val)
+static bool
+mrf24j40_short_reg_precious(struct device *dev, unsigned int reg)
 {
+	/* don't clear irq line on read */
+	switch (reg) {
+	case REG_INTSTAT:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const struct regmap_config mrf24j40_short_regmap = {
+	.name = "mrf24j40_short",
+	.reg_bits = 7,
+	.val_bits = 8,
+	.pad_bits = 1,
+	.write_flag_mask = MRF24J40_SHORT_WRITE,
+	.read_flag_mask = MRF24J40_SHORT_READ,
+	.cache_type = REGCACHE_RBTREE,
+	.max_register = MRF24J40_SHORT_NUMREGS,
+	.writeable_reg = mrf24j40_short_reg_writeable,
+	.readable_reg = mrf24j40_short_reg_readable,
+	.volatile_reg = mrf24j40_short_reg_volatile,
+	.precious_reg = mrf24j40_short_reg_precious,
+};
+
+static bool
+mrf24j40_long_reg_writeable(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case REG_RFCON0:
+	case REG_RFCON1:
+	case REG_RFCON2:
+	case REG_RFCON3:
+	case REG_RFCON5:
+	case REG_RFCON6:
+	case REG_RFCON7:
+	case REG_RFCON8:
+	case REG_SLPCAL2:
+	case REG_SLPCON0:
+	case REG_SLPCON1:
+	case REG_WAKETIMEL:
+	case REG_WAKETIMEH:
+	case REG_REMCNTL:
+	case REG_REMCNTH:
+	case REG_MAINCNT0:
+	case REG_MAINCNT1:
+	case REG_MAINCNT2:
+	case REG_MAINCNT3:
+	case REG_TESTMODE:
+	case REG_ASSOEAR0:
+	case REG_ASSOEAR1:
+	case REG_ASSOEAR2:
+	case REG_ASSOEAR3:
+	case REG_ASSOEAR4:
+	case REG_ASSOEAR5:
+	case REG_ASSOEAR6:
+	case REG_ASSOEAR7:
+	case REG_ASSOSAR0:
+	case REG_ASSOSAR1:
+	case REG_UNONCE0:
+	case REG_UNONCE1:
+	case REG_UNONCE2:
+	case REG_UNONCE3:
+	case REG_UNONCE4:
+	case REG_UNONCE5:
+	case REG_UNONCE6:
+	case REG_UNONCE7:
+	case REG_UNONCE8:
+	case REG_UNONCE9:
+	case REG_UNONCE10:
+	case REG_UNONCE11:
+	case REG_UNONCE12:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool
+mrf24j40_long_reg_readable(struct device *dev, unsigned int reg)
+{
+	bool rc;
+
+	/* all writeable are also readable */
+	rc = mrf24j40_long_reg_writeable(dev, reg);
+	if (rc)
+		return rc;
+
+	/* readonly regs */
+	switch (reg) {
+	case REG_SLPCAL0:
+	case REG_SLPCAL1:
+	case REG_RFSTATE:
+	case REG_RSSI:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool
+mrf24j40_long_reg_volatile(struct device *dev, unsigned int reg)
+{
+	/* can be changed during runtime */
+	switch (reg) {
+	case REG_SLPCAL0:
+	case REG_SLPCAL1:
+	case REG_SLPCAL2:
+	case REG_RFSTATE:
+	case REG_RSSI:
+	case REG_MAINCNT3:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const struct regmap_config mrf24j40_long_regmap = {
+	.name = "mrf24j40_long",
+	.reg_bits = 11,
+	.val_bits = 8,
+	.pad_bits = 5,
+	.write_flag_mask = MRF24J40_LONG_ACCESS,
+	.read_flag_mask = MRF24J40_LONG_ACCESS,
+	.cache_type = REGCACHE_RBTREE,
+	.max_register = MRF24J40_LONG_NUMREGS,
+	.writeable_reg = mrf24j40_long_reg_writeable,
+	.readable_reg = mrf24j40_long_reg_readable,
+	.volatile_reg = mrf24j40_long_reg_volatile,
+};
+
+static int mrf24j40_long_regmap_write(void *context, const void *data,
+				      size_t count)
+{
+	struct spi_device *spi = context;
+	u8 buf[3];
+
+	if (count > 3)
+		return -EINVAL;
+
+	/* regmap supports read/write mask only in frist byte
+	 * long write access need to set the 12th bit, so we
+	 * make special handling for write.
+	 */
+	memcpy(buf, data, count);
+	buf[1] |= (1 << 4);
+
+	return spi_write(spi, buf, count);
+}
+
+static int
+mrf24j40_long_regmap_read(void *context, const void *reg, size_t reg_size,
+			  void *val, size_t val_size)
+{
+	struct spi_device *spi = context;
+
+	return spi_write_then_read(spi, reg, reg_size, val, val_size);
+}
+
+static const struct regmap_bus mrf24j40_long_regmap_bus = {
+	.write = mrf24j40_long_regmap_write,
+	.read = mrf24j40_long_regmap_read,
+	.reg_format_endian_default = REGMAP_ENDIAN_BIG,
+	.val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static void write_tx_buf_complete(void *context)
+{
+	struct mrf24j40 *devrec = context;
+	__le16 fc = ieee802154_get_fc_from_skb(devrec->tx_skb);
+	u8 val = BIT_TXNTRIG;
 	int ret;
-	u16 cmd;
-	struct spi_message msg;
-	struct spi_transfer xfer = {
-		.len = 3,
-		.tx_buf = devrec->buf,
-		.rx_buf = devrec->buf,
-	};
 
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
+	if (ieee802154_is_ackreq(fc))
+		val |= BIT_TXNACKREQ;
 
-	cmd = MRF24J40_WRITELONG(reg);
-	mutex_lock(&devrec->buffer_mutex);
-	devrec->buf[0] = cmd >> 8 & 0xff;
-	devrec->buf[1] = cmd & 0xff;
-	devrec->buf[2] = val;
+	devrec->tx_post_msg.complete = NULL;
+	devrec->tx_post_buf[0] = MRF24J40_WRITESHORT(REG_TXNCON);
+	devrec->tx_post_buf[1] = val;
 
-	ret = spi_sync(devrec->spi, &msg);
+	ret = spi_async(devrec->spi, &devrec->tx_post_msg);
 	if (ret)
-		dev_err(printdev(devrec),
-			"SPI write Failed for long register 0x%hx\n", reg);
-
-	mutex_unlock(&devrec->buffer_mutex);
-	return ret;
+		dev_err(printdev(devrec), "SPI write Failed for transmit buf\n");
 }
 
 /* This function relies on an undocumented write method. Once a write command
@@ -217,22 +565,8 @@
 static int write_tx_buf(struct mrf24j40 *devrec, u16 reg,
 			const u8 *data, size_t length)
 {
-	int ret;
 	u16 cmd;
-	u8 lengths[2];
-	struct spi_message msg;
-	struct spi_transfer addr_xfer = {
-		.len = 2,
-		.tx_buf = devrec->buf,
-	};
-	struct spi_transfer lengths_xfer = {
-		.len = 2,
-		.tx_buf = &lengths, /* TODO: Is DMA really required for SPI? */
-	};
-	struct spi_transfer data_xfer = {
-		.len = length,
-		.tx_buf = data,
-	};
+	int ret;
 
 	/* Range check the length. 2 bytes are used for the length fields.*/
 	if (length > TX_FIFO_SIZE-2) {
@@ -240,147 +574,29 @@
 		length = TX_FIFO_SIZE-2;
 	}
 
-	spi_message_init(&msg);
-	spi_message_add_tail(&addr_xfer, &msg);
-	spi_message_add_tail(&lengths_xfer, &msg);
-	spi_message_add_tail(&data_xfer, &msg);
-
 	cmd = MRF24J40_WRITELONG(reg);
-	mutex_lock(&devrec->buffer_mutex);
-	devrec->buf[0] = cmd >> 8 & 0xff;
-	devrec->buf[1] = cmd & 0xff;
-	lengths[0] = 0x0; /* Header Length. Set to 0 for now. TODO */
-	lengths[1] = length; /* Total length */
+	devrec->tx_hdr_buf[0] = cmd >> 8 & 0xff;
+	devrec->tx_hdr_buf[1] = cmd & 0xff;
+	devrec->tx_len_buf[0] = 0x0; /* Header Length. Set to 0 for now. TODO */
+	devrec->tx_len_buf[1] = length; /* Total length */
+	devrec->tx_buf_trx.tx_buf = data;
+	devrec->tx_buf_trx.len = length;
 
-	ret = spi_sync(devrec->spi, &msg);
+	ret = spi_async(devrec->spi, &devrec->tx_msg);
 	if (ret)
 		dev_err(printdev(devrec), "SPI write Failed for TX buf\n");
 
-	mutex_unlock(&devrec->buffer_mutex);
-	return ret;
-}
-
-static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
-				u8 *data, u8 *len, u8 *lqi)
-{
-	u8 rx_len;
-	u8 addr[2];
-	u8 lqi_rssi[2];
-	u16 cmd;
-	int ret;
-	struct spi_message msg;
-	struct spi_transfer addr_xfer = {
-		.len = 2,
-		.tx_buf = &addr,
-	};
-	struct spi_transfer data_xfer = {
-		.len = 0x0, /* set below */
-		.rx_buf = data,
-	};
-	struct spi_transfer status_xfer = {
-		.len = 2,
-		.rx_buf = &lqi_rssi,
-	};
-
-	/* Get the length of the data in the RX FIFO. The length in this
-	 * register exclues the 1-byte length field at the beginning. */
-	ret = read_long_reg(devrec, REG_RX_FIFO, &rx_len);
-	if (ret)
-		goto out;
-
-	/* Range check the RX FIFO length, accounting for the one-byte
-	 * length field at the beginning. */
-	if (rx_len > RX_FIFO_SIZE-1) {
-		dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n");
-		rx_len = RX_FIFO_SIZE-1;
-	}
-
-	if (rx_len > *len) {
-		/* Passed in buffer wasn't big enough. Should never happen. */
-		dev_err(printdev(devrec), "Buffer not big enough. Performing short read\n");
-		rx_len = *len;
-	}
-
-	/* Set up the commands to read the data. */
-	cmd = MRF24J40_READLONG(REG_RX_FIFO+1);
-	addr[0] = cmd >> 8 & 0xff;
-	addr[1] = cmd & 0xff;
-	data_xfer.len = rx_len;
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&addr_xfer, &msg);
-	spi_message_add_tail(&data_xfer, &msg);
-	spi_message_add_tail(&status_xfer, &msg);
-
-	ret = spi_sync(devrec->spi, &msg);
-	if (ret) {
-		dev_err(printdev(devrec), "SPI RX Buffer Read Failed.\n");
-		goto out;
-	}
-
-	*lqi = lqi_rssi[0];
-	*len = rx_len;
-
-#ifdef DEBUG
-	print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ",
-		       DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
-	pr_debug("mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n",
-		 lqi_rssi[0], lqi_rssi[1]);
-#endif
-
-out:
 	return ret;
 }
 
 static int mrf24j40_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
 {
 	struct mrf24j40 *devrec = hw->priv;
-	u8 val;
-	int ret = 0;
 
 	dev_dbg(printdev(devrec), "tx packet of %d bytes\n", skb->len);
+	devrec->tx_skb = skb;
 
-	ret = write_tx_buf(devrec, 0x000, skb->data, skb->len);
-	if (ret)
-		goto err;
-
-	reinit_completion(&devrec->tx_complete);
-
-	/* Set TXNTRIG bit of TXNCON to send packet */
-	ret = read_short_reg(devrec, REG_TXNCON, &val);
-	if (ret)
-		goto err;
-	val |= 0x1;
-	/* Set TXNACKREQ if the ACK bit is set in the packet. */
-	if (skb->data[0] & IEEE802154_FC_ACK_REQ)
-		val |= 0x4;
-	write_short_reg(devrec, REG_TXNCON, val);
-
-	/* Wait for the device to send the TX complete interrupt. */
-	ret = wait_for_completion_interruptible_timeout(
-						&devrec->tx_complete,
-						5 * HZ);
-	if (ret == -ERESTARTSYS)
-		goto err;
-	if (ret == 0) {
-		dev_warn(printdev(devrec), "Timeout waiting for TX interrupt\n");
-		ret = -ETIMEDOUT;
-		goto err;
-	}
-
-	/* Check for send error from the device. */
-	ret = read_short_reg(devrec, REG_TXSTAT, &val);
-	if (ret)
-		goto err;
-	if (val & 0x1) {
-		dev_dbg(printdev(devrec), "Error Sending. Retry count exceeded\n");
-		ret = -ECOMM; /* TODO: Better error code ? */
-	} else
-		dev_dbg(printdev(devrec), "Packet Sent\n");
-
-err:
-
-	return ret;
+	return write_tx_buf(devrec, 0x000, skb->data, skb->len);
 }
 
 static int mrf24j40_ed(struct ieee802154_hw *hw, u8 *level)
@@ -394,33 +610,23 @@
 static int mrf24j40_start(struct ieee802154_hw *hw)
 {
 	struct mrf24j40 *devrec = hw->priv;
-	u8 val;
-	int ret;
 
 	dev_dbg(printdev(devrec), "start\n");
 
-	ret = read_short_reg(devrec, REG_INTCON, &val);
-	if (ret)
-		return ret;
-	val &= ~(0x1|0x8); /* Clear TXNIE and RXIE. Enable interrupts */
-	write_short_reg(devrec, REG_INTCON, val);
-
-	return 0;
+	/* Clear TXNIE and RXIE. Enable interrupts */
+	return regmap_update_bits(devrec->regmap_short, REG_INTCON,
+				  BIT_TXNIE | BIT_RXIE, 0);
 }
 
 static void mrf24j40_stop(struct ieee802154_hw *hw)
 {
 	struct mrf24j40 *devrec = hw->priv;
-	u8 val;
-	int ret;
 
 	dev_dbg(printdev(devrec), "stop\n");
 
-	ret = read_short_reg(devrec, REG_INTCON, &val);
-	if (ret)
-		return;
-	val |= 0x1|0x8; /* Set TXNIE and RXIE. Disable Interrupts */
-	write_short_reg(devrec, REG_INTCON, val);
+	/* Set TXNIE and RXIE. Disable Interrupts */
+	regmap_update_bits(devrec->regmap_short, REG_INTCON,
+			   BIT_TXNIE | BIT_TXNIE, BIT_TXNIE | BIT_TXNIE);
 }
 
 static int mrf24j40_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
@@ -436,21 +642,23 @@
 	WARN_ON(channel > MRF24J40_CHAN_MAX);
 
 	/* Set Channel TODO */
-	val = (channel-11) << 4 | 0x03;
-	write_long_reg(devrec, REG_RFCON0, val);
-
-	/* RF Reset */
-	ret = read_short_reg(devrec, REG_RFCTL, &val);
+	val = (channel - 11) << RFCON0_CH_SHIFT | RFOPT_RECOMMEND;
+	ret = regmap_update_bits(devrec->regmap_long, REG_RFCON0,
+				 RFCON0_CH_MASK, val);
 	if (ret)
 		return ret;
-	val |= 0x04;
-	write_short_reg(devrec, REG_RFCTL, val);
-	val &= ~0x04;
-	write_short_reg(devrec, REG_RFCTL, val);
 
-	udelay(SET_CHANNEL_DELAY_US); /* per datasheet */
+	/* RF Reset */
+	ret = regmap_update_bits(devrec->regmap_short, REG_RFCTL, BIT_RFRST,
+				 BIT_RFRST);
+	if (ret)
+		return ret;
 
-	return 0;
+	ret = regmap_update_bits(devrec->regmap_short, REG_RFCTL, BIT_RFRST, 0);
+	if (!ret)
+		udelay(SET_CHANNEL_DELAY_US); /* per datasheet */
+
+	return ret;
 }
 
 static int mrf24j40_filter(struct ieee802154_hw *hw,
@@ -468,8 +676,8 @@
 		addrh = le16_to_cpu(filt->short_addr) >> 8 & 0xff;
 		addrl = le16_to_cpu(filt->short_addr) & 0xff;
 
-		write_short_reg(devrec, REG_SADRH, addrh);
-		write_short_reg(devrec, REG_SADRL, addrl);
+		regmap_write(devrec->regmap_short, REG_SADRH, addrh);
+		regmap_write(devrec->regmap_short, REG_SADRL, addrl);
 		dev_dbg(printdev(devrec),
 			"Set short addr to %04hx\n", filt->short_addr);
 	}
@@ -480,7 +688,8 @@
 
 		memcpy(addr, &filt->ieee_addr, 8);
 		for (i = 0; i < 8; i++)
-			write_short_reg(devrec, REG_EADR0 + i, addr[i]);
+			regmap_write(devrec->regmap_short, REG_EADR0 + i,
+				     addr[i]);
 
 #ifdef DEBUG
 		pr_debug("Set long addr to: ");
@@ -496,8 +705,8 @@
 
 		panidh = le16_to_cpu(filt->pan_id) >> 8 & 0xff;
 		panidl = le16_to_cpu(filt->pan_id) & 0xff;
-		write_short_reg(devrec, REG_PANIDH, panidh);
-		write_short_reg(devrec, REG_PANIDL, panidl);
+		regmap_write(devrec->regmap_short, REG_PANIDH, panidh);
+		regmap_write(devrec->regmap_short, REG_PANIDL, panidl);
 
 		dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id);
 	}
@@ -507,14 +716,14 @@
 		u8 val;
 		int ret;
 
-		ret = read_short_reg(devrec, REG_RXMCR, &val);
+		if (filt->pan_coord)
+			val = BIT_PANCOORD;
+		else
+			val = 0;
+		ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR,
+					 BIT_PANCOORD, val);
 		if (ret)
 			return ret;
-		if (filt->pan_coord)
-			val |= 0x8;
-		else
-			val &= ~0x8;
-		write_short_reg(devrec, REG_RXMCR, val);
 
 		/* REG_SLOTTED is maintained as default (unslotted/CSMA-CA).
 		 * REG_ORDER is maintained as default (no beacon/superframe).
@@ -527,168 +736,392 @@
 	return 0;
 }
 
-static int mrf24j40_handle_rx(struct mrf24j40 *devrec)
+static void mrf24j40_handle_rx_read_buf_unlock(struct mrf24j40 *devrec)
 {
-	u8 len = RX_FIFO_SIZE;
-	u8 lqi = 0;
-	u8 val;
-	int ret = 0;
-	int ret2;
+	int ret;
+
+	/* Turn back on reception of packets off the air. */
+	devrec->rx_msg.complete = NULL;
+	devrec->rx_buf[0] = MRF24J40_WRITESHORT(REG_BBREG1);
+	devrec->rx_buf[1] = 0x00; /* CLR RXDECINV */
+	ret = spi_async(devrec->spi, &devrec->rx_msg);
+	if (ret)
+		dev_err(printdev(devrec), "failed to unlock rx buffer\n");
+}
+
+static void mrf24j40_handle_rx_read_buf_complete(void *context)
+{
+	struct mrf24j40 *devrec = context;
+	u8 len = devrec->rx_buf[2];
+	u8 rx_local_buf[RX_FIFO_SIZE];
 	struct sk_buff *skb;
 
-	/* Turn off reception of packets off the air. This prevents the
-	 * device from overwriting the buffer while we're reading it. */
-	ret = read_short_reg(devrec, REG_BBREG1, &val);
-	if (ret)
-		goto out;
-	val |= 4; /* SET RXDECINV */
-	write_short_reg(devrec, REG_BBREG1, val);
+	memcpy(rx_local_buf, devrec->rx_fifo_buf, len);
+	mrf24j40_handle_rx_read_buf_unlock(devrec);
 
-	skb = dev_alloc_skb(len);
+	skb = dev_alloc_skb(IEEE802154_MTU);
 	if (!skb) {
-		ret = -ENOMEM;
-		goto out;
+		dev_err(printdev(devrec), "failed to allocate skb\n");
+		return;
 	}
 
-	ret = mrf24j40_read_rx_buf(devrec, skb_put(skb, len), &len, &lqi);
-	if (ret < 0) {
-		dev_err(printdev(devrec), "Failure reading RX FIFO\n");
-		kfree_skb(skb);
-		ret = -EINVAL;
-		goto out;
+	memcpy(skb_put(skb, len), rx_local_buf, len);
+	ieee802154_rx_irqsafe(devrec->hw, skb, 0);
+
+#ifdef DEBUG
+	 print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ", DUMP_PREFIX_OFFSET, 16, 1,
+			rx_local_buf, len, 0);
+	 pr_debug("mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n",
+		  devrec->rx_lqi_buf[0], devrec->rx_lqi_buf[1]);
+#endif
+}
+
+static void mrf24j40_handle_rx_read_buf(void *context)
+{
+	struct mrf24j40 *devrec = context;
+	u16 cmd;
+	int ret;
+
+	/* if length is invalid read the full MTU */
+	if (!ieee802154_is_valid_psdu_len(devrec->rx_buf[2]))
+		devrec->rx_buf[2] = IEEE802154_MTU;
+
+	cmd = MRF24J40_READLONG(REG_RX_FIFO + 1);
+	devrec->rx_addr_buf[0] = cmd >> 8 & 0xff;
+	devrec->rx_addr_buf[1] = cmd & 0xff;
+	devrec->rx_fifo_buf_trx.len = devrec->rx_buf[2];
+	ret = spi_async(devrec->spi, &devrec->rx_buf_msg);
+	if (ret) {
+		dev_err(printdev(devrec), "failed to read rx buffer\n");
+		mrf24j40_handle_rx_read_buf_unlock(devrec);
+	}
+}
+
+static void mrf24j40_handle_rx_read_len(void *context)
+{
+	struct mrf24j40 *devrec = context;
+	u16 cmd;
+	int ret;
+
+	/* read the length of received frame */
+	devrec->rx_msg.complete = mrf24j40_handle_rx_read_buf;
+	devrec->rx_trx.len = 3;
+	cmd = MRF24J40_READLONG(REG_RX_FIFO);
+	devrec->rx_buf[0] = cmd >> 8 & 0xff;
+	devrec->rx_buf[1] = cmd & 0xff;
+
+	ret = spi_async(devrec->spi, &devrec->rx_msg);
+	if (ret) {
+		dev_err(printdev(devrec), "failed to read rx buffer length\n");
+		mrf24j40_handle_rx_read_buf_unlock(devrec);
+	}
+}
+
+static int mrf24j40_handle_rx(struct mrf24j40 *devrec)
+{
+	/* Turn off reception of packets off the air. This prevents the
+	 * device from overwriting the buffer while we're reading it.
+	 */
+	devrec->rx_msg.complete = mrf24j40_handle_rx_read_len;
+	devrec->rx_trx.len = 2;
+	devrec->rx_buf[0] = MRF24J40_WRITESHORT(REG_BBREG1);
+	devrec->rx_buf[1] = BIT_RXDECINV; /* SET RXDECINV */
+
+	return spi_async(devrec->spi, &devrec->rx_msg);
+}
+
+static int
+mrf24j40_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be,
+		     u8 retries)
+{
+	struct mrf24j40 *devrec = hw->priv;
+	u8 val;
+
+	/* min_be */
+	val = min_be << TXMCR_MIN_BE_SHIFT;
+	/* csma backoffs */
+	val |= retries << TXMCR_CSMA_RETRIES_SHIFT;
+
+	return regmap_update_bits(devrec->regmap_short, REG_TXMCR,
+				  TXMCR_MIN_BE_MASK | TXMCR_CSMA_RETRIES_MASK,
+				  val);
+}
+
+static int mrf24j40_set_cca_mode(struct ieee802154_hw *hw,
+				 const struct wpan_phy_cca *cca)
+{
+	struct mrf24j40 *devrec = hw->priv;
+	u8 val;
+
+	/* mapping 802.15.4 to driver spec */
+	switch (cca->mode) {
+	case NL802154_CCA_ENERGY:
+		val = 2;
+		break;
+	case NL802154_CCA_CARRIER:
+		val = 1;
+		break;
+	case NL802154_CCA_ENERGY_CARRIER:
+		switch (cca->opt) {
+		case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
+			val = 3;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
 	}
 
-	/* Cut off the checksum */
-	skb_trim(skb, len-2);
+	return regmap_update_bits(devrec->regmap_short, REG_BBREG2,
+				  BBREG2_CCA_MODE_MASK,
+				  val << BBREG2_CCA_MODE_SHIFT);
+}
 
-	/* TODO: Other drivers call ieee20154_rx_irqsafe() here (eg: cc2040,
-	 * also from a workqueue).  I think irqsafe is not necessary here.
-	 * Can someone confirm? */
-	ieee802154_rx_irqsafe(devrec->hw, skb, lqi);
+/* array for representing ed levels */
+static const s32 mrf24j40_ed_levels[] = {
+	-9000, -8900, -8800, -8700, -8600, -8500, -8400, -8300, -8200, -8100,
+	-8000, -7900, -7800, -7700, -7600, -7500, -7400, -7300, -7200, -7100,
+	-7000, -6900, -6800, -6700, -6600, -6500, -6400, -6300, -6200, -6100,
+	-6000, -5900, -5800, -5700, -5600, -5500, -5400, -5300, -5200, -5100,
+	-5000, -4900, -4800, -4700, -4600, -4500, -4400, -4300, -4200, -4100,
+	-4000, -3900, -3800, -3700, -3600, -3500
+};
 
-	dev_dbg(printdev(devrec), "RX Handled\n");
+/* map ed levels to register value */
+static const s32 mrf24j40_ed_levels_map[][2] = {
+	{ -9000, 0 }, { -8900, 1 }, { -8800, 2 }, { -8700, 5 }, { -8600, 9 },
+	{ -8500, 13 }, { -8400, 18 }, { -8300, 23 }, { -8200, 27 },
+	{ -8100, 32 }, { -8000, 37 }, { -7900, 43 }, { -7800, 48 },
+	{ -7700, 53 }, { -7600, 58 }, { -7500, 63 }, { -7400, 68 },
+	{ -7300, 73 }, { -7200, 78 }, { -7100, 83 }, { -7000, 89 },
+	{ -6900, 95 }, { -6800, 100 }, { -6700, 107 }, { -6600, 111 },
+	{ -6500, 117 }, { -6400, 121 }, { -6300, 125 }, { -6200, 129 },
+	{ -6100, 133 },	{ -6000, 138 }, { -5900, 143 }, { -5800, 148 },
+	{ -5700, 153 }, { -5600, 159 },	{ -5500, 165 }, { -5400, 170 },
+	{ -5300, 176 }, { -5200, 183 }, { -5100, 188 }, { -5000, 193 },
+	{ -4900, 198 }, { -4800, 203 }, { -4700, 207 }, { -4600, 212 },
+	{ -4500, 216 }, { -4400, 221 }, { -4300, 225 }, { -4200, 228 },
+	{ -4100, 233 }, { -4000, 239 }, { -3900, 245 }, { -3800, 250 },
+	{ -3700, 253 }, { -3600, 254 }, { -3500, 255 },
+};
 
-out:
-	/* Turn back on reception of packets off the air. */
-	ret2 = read_short_reg(devrec, REG_BBREG1, &val);
-	if (ret2)
-		return ret2;
-	val &= ~0x4; /* Clear RXDECINV */
-	write_short_reg(devrec, REG_BBREG1, val);
+static int mrf24j40_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
+{
+	struct mrf24j40 *devrec = hw->priv;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mrf24j40_ed_levels_map); i++) {
+		if (mrf24j40_ed_levels_map[i][0] == mbm)
+			return regmap_write(devrec->regmap_short, REG_CCAEDTH,
+					    mrf24j40_ed_levels_map[i][1]);
+	}
+
+	return -EINVAL;
+}
+
+static const s32 mrf24j40ma_powers[] = {
+	0, -50, -120, -190, -280, -370, -490, -630, -1000, -1050, -1120, -1190,
+	-1280, -1370, -1490, -1630, -2000, -2050, -2120, -2190, -2280, -2370,
+	-2490, -2630, -3000, -3050, -3120, -3190, -3280, -3370, -3490, -3630,
+};
+
+static int mrf24j40_set_txpower(struct ieee802154_hw *hw, s32 mbm)
+{
+	struct mrf24j40 *devrec = hw->priv;
+	s32 small_scale;
+	u8 val;
+
+	if (0 >= mbm && mbm > -1000) {
+		val = TXPWRL_0 << TXPWRL_SHIFT;
+		small_scale = mbm;
+	} else if (-1000 >= mbm && mbm > -2000) {
+		val = TXPWRL_10 << TXPWRL_SHIFT;
+		small_scale = mbm + 1000;
+	} else if (-2000 >= mbm && mbm > -3000) {
+		val = TXPWRL_20 << TXPWRL_SHIFT;
+		small_scale = mbm + 2000;
+	} else if (-3000 >= mbm && mbm > -4000) {
+		val = TXPWRL_30 << TXPWRL_SHIFT;
+		small_scale = mbm + 3000;
+	} else {
+		return -EINVAL;
+	}
+
+	switch (small_scale) {
+	case 0:
+		val |= (TXPWRS_0 << TXPWRS_SHIFT);
+		break;
+	case -50:
+		val |= (TXPWRS_0_5 << TXPWRS_SHIFT);
+		break;
+	case -120:
+		val |= (TXPWRS_1_2 << TXPWRS_SHIFT);
+		break;
+	case -190:
+		val |= (TXPWRS_1_9 << TXPWRS_SHIFT);
+		break;
+	case -280:
+		val |= (TXPWRS_2_8 << TXPWRS_SHIFT);
+		break;
+	case -370:
+		val |= (TXPWRS_3_7 << TXPWRS_SHIFT);
+		break;
+	case -490:
+		val |= (TXPWRS_4_9 << TXPWRS_SHIFT);
+		break;
+	case -630:
+		val |= (TXPWRS_6_3 << TXPWRS_SHIFT);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return regmap_update_bits(devrec->regmap_long, REG_RFCON3,
+				  TXPWRL_MASK | TXPWRS_MASK, val);
+}
+
+static int mrf24j40_set_promiscuous_mode(struct ieee802154_hw *hw, bool on)
+{
+	struct mrf24j40 *devrec = hw->priv;
+	int ret;
+
+	if (on) {
+		/* set PROMI, ERRPKT and NOACKRSP */
+		ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR,
+					 BIT_PROMI | BIT_ERRPKT | BIT_NOACKRSP,
+					 BIT_PROMI | BIT_ERRPKT | BIT_NOACKRSP);
+	} else {
+		/* clear PROMI, ERRPKT and NOACKRSP */
+		ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR,
+					 BIT_PROMI | BIT_ERRPKT | BIT_NOACKRSP,
+					 0);
+	}
 
 	return ret;
 }
 
 static const struct ieee802154_ops mrf24j40_ops = {
 	.owner = THIS_MODULE,
-	.xmit_sync = mrf24j40_tx,
+	.xmit_async = mrf24j40_tx,
 	.ed = mrf24j40_ed,
 	.start = mrf24j40_start,
 	.stop = mrf24j40_stop,
 	.set_channel = mrf24j40_set_channel,
 	.set_hw_addr_filt = mrf24j40_filter,
+	.set_csma_params = mrf24j40_csma_params,
+	.set_cca_mode = mrf24j40_set_cca_mode,
+	.set_cca_ed_level = mrf24j40_set_cca_ed_level,
+	.set_txpower = mrf24j40_set_txpower,
+	.set_promiscuous_mode = mrf24j40_set_promiscuous_mode,
 };
 
+static void mrf24j40_intstat_complete(void *context)
+{
+	struct mrf24j40 *devrec = context;
+	u8 intstat = devrec->irq_buf[1];
+
+	enable_irq(devrec->spi->irq);
+
+	/* Check for TX complete */
+	if (intstat & BIT_TXNIF)
+		ieee802154_xmit_complete(devrec->hw, devrec->tx_skb, false);
+
+	/* Check for Rx */
+	if (intstat & BIT_RXIF)
+		mrf24j40_handle_rx(devrec);
+}
+
 static irqreturn_t mrf24j40_isr(int irq, void *data)
 {
 	struct mrf24j40 *devrec = data;
-	u8 intstat;
 	int ret;
 
+	disable_irq_nosync(irq);
+
+	devrec->irq_buf[0] = MRF24J40_READSHORT(REG_INTSTAT);
 	/* Read the interrupt status */
-	ret = read_short_reg(devrec, REG_INTSTAT, &intstat);
-	if (ret)
-		goto out;
+	ret = spi_async(devrec->spi, &devrec->irq_msg);
+	if (ret) {
+		enable_irq(irq);
+		return IRQ_NONE;
+	}
 
-	/* Check for TX complete */
-	if (intstat & 0x1)
-		complete(&devrec->tx_complete);
-
-	/* Check for Rx */
-	if (intstat & 0x8)
-		mrf24j40_handle_rx(devrec);
-
-out:
 	return IRQ_HANDLED;
 }
 
 static int mrf24j40_hw_init(struct mrf24j40 *devrec)
 {
+	u32 irq_type;
 	int ret;
-	u8 val;
 
 	/* Initialize the device.
 		From datasheet section 3.2: Initialization. */
-	ret = write_short_reg(devrec, REG_SOFTRST, 0x07);
+	ret = regmap_write(devrec->regmap_short, REG_SOFTRST, 0x07);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_PACON2, 0x98);
+	ret = regmap_write(devrec->regmap_short, REG_PACON2, 0x98);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_TXSTBL, 0x95);
+	ret = regmap_write(devrec->regmap_short, REG_TXSTBL, 0x95);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_RFCON0, 0x03);
+	ret = regmap_write(devrec->regmap_long, REG_RFCON0, 0x03);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_RFCON1, 0x01);
+	ret = regmap_write(devrec->regmap_long, REG_RFCON1, 0x01);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_RFCON2, 0x80);
+	ret = regmap_write(devrec->regmap_long, REG_RFCON2, 0x80);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_RFCON6, 0x90);
+	ret = regmap_write(devrec->regmap_long, REG_RFCON6, 0x90);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_RFCON7, 0x80);
+	ret = regmap_write(devrec->regmap_long, REG_RFCON7, 0x80);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_RFCON8, 0x10);
+	ret = regmap_write(devrec->regmap_long, REG_RFCON8, 0x10);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_SLPCON1, 0x21);
+	ret = regmap_write(devrec->regmap_long, REG_SLPCON1, 0x21);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_BBREG2, 0x80);
+	ret = regmap_write(devrec->regmap_short, REG_BBREG2, 0x80);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_CCAEDTH, 0x60);
+	ret = regmap_write(devrec->regmap_short, REG_CCAEDTH, 0x60);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_BBREG6, 0x40);
+	ret = regmap_write(devrec->regmap_short, REG_BBREG6, 0x40);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_RFCTL, 0x04);
+	ret = regmap_write(devrec->regmap_short, REG_RFCTL, 0x04);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_RFCTL, 0x0);
+	ret = regmap_write(devrec->regmap_short, REG_RFCTL, 0x0);
 	if (ret)
 		goto err_ret;
 
 	udelay(192);
 
 	/* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */
-	ret = read_short_reg(devrec, REG_RXMCR, &val);
-	if (ret)
-		goto err_ret;
-
-	val &= ~0x3; /* Clear RX mode (normal) */
-
-	ret = write_short_reg(devrec, REG_RXMCR, val);
+	ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR, 0x03, 0x00);
 	if (ret)
 		goto err_ret;
 
@@ -696,22 +1129,39 @@
 		/* Enable external amplifier.
 		 * From MRF24J40MC datasheet section 1.3: Operation.
 		 */
-		read_long_reg(devrec, REG_TESTMODE, &val);
-		val |= 0x7; /* Configure GPIO 0-2 to control amplifier */
-		write_long_reg(devrec, REG_TESTMODE, val);
+		regmap_update_bits(devrec->regmap_long, REG_TESTMODE, 0x07,
+				   0x07);
 
-		read_short_reg(devrec, REG_TRISGPIO, &val);
-		val |= 0x8; /* Set GPIO3 as output. */
-		write_short_reg(devrec, REG_TRISGPIO, val);
+		/* Set GPIO3 as output. */
+		regmap_update_bits(devrec->regmap_short, REG_TRISGPIO, 0x08,
+				   0x08);
 
-		read_short_reg(devrec, REG_GPIO, &val);
-		val |= 0x8; /* Set GPIO3 HIGH to enable U5 voltage regulator */
-		write_short_reg(devrec, REG_GPIO, val);
+		/* Set GPIO3 HIGH to enable U5 voltage regulator */
+		regmap_update_bits(devrec->regmap_short, REG_GPIO, 0x08, 0x08);
 
 		/* Reduce TX pwr to meet FCC requirements.
 		 * From MRF24J40MC datasheet section 3.1.1
 		 */
-		write_long_reg(devrec, REG_RFCON3, 0x28);
+		regmap_write(devrec->regmap_long, REG_RFCON3, 0x28);
+	}
+
+	irq_type = irq_get_trigger_type(devrec->spi->irq);
+	if (irq_type == IRQ_TYPE_EDGE_RISING ||
+	    irq_type == IRQ_TYPE_EDGE_FALLING)
+		dev_warn(&devrec->spi->dev,
+			 "Using edge triggered irq's are not recommended, because it can cause races and result in a non-functional driver!\n");
+	switch (irq_type) {
+	case IRQ_TYPE_EDGE_RISING:
+	case IRQ_TYPE_LEVEL_HIGH:
+		/* set interrupt polarity to rising */
+		ret = regmap_update_bits(devrec->regmap_long, REG_SLPCON0,
+					 BIT_INTEDGE, BIT_INTEDGE);
+		if (ret)
+			goto err_ret;
+		break;
+	default:
+		/* default is falling edge */
+		break;
 	}
 
 	return 0;
@@ -720,67 +1170,178 @@
 	return ret;
 }
 
+static void
+mrf24j40_setup_tx_spi_messages(struct mrf24j40 *devrec)
+{
+	spi_message_init(&devrec->tx_msg);
+	devrec->tx_msg.context = devrec;
+	devrec->tx_msg.complete = write_tx_buf_complete;
+	devrec->tx_hdr_trx.len = 2;
+	devrec->tx_hdr_trx.tx_buf = devrec->tx_hdr_buf;
+	spi_message_add_tail(&devrec->tx_hdr_trx, &devrec->tx_msg);
+	devrec->tx_len_trx.len = 2;
+	devrec->tx_len_trx.tx_buf = devrec->tx_len_buf;
+	spi_message_add_tail(&devrec->tx_len_trx, &devrec->tx_msg);
+	spi_message_add_tail(&devrec->tx_buf_trx, &devrec->tx_msg);
+
+	spi_message_init(&devrec->tx_post_msg);
+	devrec->tx_post_msg.context = devrec;
+	devrec->tx_post_trx.len = 2;
+	devrec->tx_post_trx.tx_buf = devrec->tx_post_buf;
+	spi_message_add_tail(&devrec->tx_post_trx, &devrec->tx_post_msg);
+}
+
+static void
+mrf24j40_setup_rx_spi_messages(struct mrf24j40 *devrec)
+{
+	spi_message_init(&devrec->rx_msg);
+	devrec->rx_msg.context = devrec;
+	devrec->rx_trx.len = 2;
+	devrec->rx_trx.tx_buf = devrec->rx_buf;
+	devrec->rx_trx.rx_buf = devrec->rx_buf;
+	spi_message_add_tail(&devrec->rx_trx, &devrec->rx_msg);
+
+	spi_message_init(&devrec->rx_buf_msg);
+	devrec->rx_buf_msg.context = devrec;
+	devrec->rx_buf_msg.complete = mrf24j40_handle_rx_read_buf_complete;
+	devrec->rx_addr_trx.len = 2;
+	devrec->rx_addr_trx.tx_buf = devrec->rx_addr_buf;
+	spi_message_add_tail(&devrec->rx_addr_trx, &devrec->rx_buf_msg);
+	devrec->rx_fifo_buf_trx.rx_buf = devrec->rx_fifo_buf;
+	spi_message_add_tail(&devrec->rx_fifo_buf_trx, &devrec->rx_buf_msg);
+	devrec->rx_lqi_trx.len = 2;
+	devrec->rx_lqi_trx.rx_buf = devrec->rx_lqi_buf;
+	spi_message_add_tail(&devrec->rx_lqi_trx, &devrec->rx_buf_msg);
+}
+
+static void
+mrf24j40_setup_irq_spi_messages(struct mrf24j40 *devrec)
+{
+	spi_message_init(&devrec->irq_msg);
+	devrec->irq_msg.context = devrec;
+	devrec->irq_msg.complete = mrf24j40_intstat_complete;
+	devrec->irq_trx.len = 2;
+	devrec->irq_trx.tx_buf = devrec->irq_buf;
+	devrec->irq_trx.rx_buf = devrec->irq_buf;
+	spi_message_add_tail(&devrec->irq_trx, &devrec->irq_msg);
+}
+
+static void  mrf24j40_phy_setup(struct mrf24j40 *devrec)
+{
+	ieee802154_random_extended_addr(&devrec->hw->phy->perm_extended_addr);
+	devrec->hw->phy->current_channel = 11;
+
+	/* mrf24j40 supports max_minbe 0 - 3 */
+	devrec->hw->phy->supported.max_minbe = 3;
+	/* datasheet doesn't say anything about max_be, but we have min_be
+	 * So we assume the max_be default.
+	 */
+	devrec->hw->phy->supported.min_maxbe = 5;
+	devrec->hw->phy->supported.max_maxbe = 5;
+
+	devrec->hw->phy->cca.mode = NL802154_CCA_CARRIER;
+	devrec->hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+					       BIT(NL802154_CCA_CARRIER) |
+					       BIT(NL802154_CCA_ENERGY_CARRIER);
+	devrec->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND);
+
+	devrec->hw->phy->cca_ed_level = -6900;
+	devrec->hw->phy->supported.cca_ed_levels = mrf24j40_ed_levels;
+	devrec->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(mrf24j40_ed_levels);
+
+	switch (spi_get_device_id(devrec->spi)->driver_data) {
+	case MRF24J40:
+	case MRF24J40MA:
+		devrec->hw->phy->supported.tx_powers = mrf24j40ma_powers;
+		devrec->hw->phy->supported.tx_powers_size = ARRAY_SIZE(mrf24j40ma_powers);
+		devrec->hw->phy->flags |= WPAN_PHY_FLAG_TXPOWER;
+		break;
+	default:
+		break;
+	}
+}
+
 static int mrf24j40_probe(struct spi_device *spi)
 {
-	int ret = -ENOMEM;
+	int ret = -ENOMEM, irq_type;
+	struct ieee802154_hw *hw;
 	struct mrf24j40 *devrec;
 
 	dev_info(&spi->dev, "probe(). IRQ: %d\n", spi->irq);
 
-	devrec = devm_kzalloc(&spi->dev, sizeof(struct mrf24j40), GFP_KERNEL);
-	if (!devrec)
-		goto err_ret;
-	devrec->buf = devm_kzalloc(&spi->dev, 3, GFP_KERNEL);
-	if (!devrec->buf)
-		goto err_ret;
-
-	spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
-	if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
-		spi->max_speed_hz = MAX_SPI_SPEED_HZ;
-
-	mutex_init(&devrec->buffer_mutex);
-	init_completion(&devrec->tx_complete);
-	devrec->spi = spi;
-	spi_set_drvdata(spi, devrec);
-
 	/* Register with the 802154 subsystem */
 
-	devrec->hw = ieee802154_alloc_hw(0, &mrf24j40_ops);
-	if (!devrec->hw)
+	hw = ieee802154_alloc_hw(sizeof(*devrec), &mrf24j40_ops);
+	if (!hw)
 		goto err_ret;
 
-	devrec->hw->priv = devrec;
-	devrec->hw->parent = &devrec->spi->dev;
+	devrec = hw->priv;
+	devrec->spi = spi;
+	spi_set_drvdata(spi, devrec);
+	devrec->hw = hw;
+	devrec->hw->parent = &spi->dev;
 	devrec->hw->phy->supported.channels[0] = CHANNEL_MASK;
-	devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AFILT;
+	devrec->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
+			    IEEE802154_HW_CSMA_PARAMS |
+			    IEEE802154_HW_PROMISCUOUS;
+
+	devrec->hw->phy->flags = WPAN_PHY_FLAG_CCA_MODE |
+				 WPAN_PHY_FLAG_CCA_ED_LEVEL;
+
+	mrf24j40_setup_tx_spi_messages(devrec);
+	mrf24j40_setup_rx_spi_messages(devrec);
+	mrf24j40_setup_irq_spi_messages(devrec);
+
+	devrec->regmap_short = devm_regmap_init_spi(spi,
+						    &mrf24j40_short_regmap);
+	if (IS_ERR(devrec->regmap_short)) {
+		ret = PTR_ERR(devrec->regmap_short);
+		dev_err(&spi->dev, "Failed to allocate short register map: %d\n",
+			ret);
+		goto err_register_device;
+	}
+
+	devrec->regmap_long = devm_regmap_init(&spi->dev,
+					       &mrf24j40_long_regmap_bus,
+					       spi, &mrf24j40_long_regmap);
+	if (IS_ERR(devrec->regmap_long)) {
+		ret = PTR_ERR(devrec->regmap_long);
+		dev_err(&spi->dev, "Failed to allocate long register map: %d\n",
+			ret);
+		goto err_register_device;
+	}
+
+	if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) {
+		dev_warn(&spi->dev, "spi clock above possible maximum: %d",
+			 MAX_SPI_SPEED_HZ);
+		return -EINVAL;
+	}
+
+	ret = mrf24j40_hw_init(devrec);
+	if (ret)
+		goto err_register_device;
+
+	mrf24j40_phy_setup(devrec);
+
+	/* request IRQF_TRIGGER_LOW as fallback default */
+	irq_type = irq_get_trigger_type(spi->irq);
+	if (!irq_type)
+		irq_type = IRQF_TRIGGER_LOW;
+
+	ret = devm_request_irq(&spi->dev, spi->irq, mrf24j40_isr,
+			       irq_type, dev_name(&spi->dev), devrec);
+	if (ret) {
+		dev_err(printdev(devrec), "Unable to get IRQ");
+		goto err_register_device;
+	}
 
 	dev_dbg(printdev(devrec), "registered mrf24j40\n");
 	ret = ieee802154_register_hw(devrec->hw);
 	if (ret)
 		goto err_register_device;
 
-	ret = mrf24j40_hw_init(devrec);
-	if (ret)
-		goto err_hw_init;
-
-	ret = devm_request_threaded_irq(&spi->dev,
-					spi->irq,
-					NULL,
-					mrf24j40_isr,
-					IRQF_TRIGGER_LOW|IRQF_ONESHOT,
-					dev_name(&spi->dev),
-					devrec);
-
-	if (ret) {
-		dev_err(printdev(devrec), "Unable to get IRQ");
-		goto err_irq;
-	}
-
 	return 0;
 
-err_irq:
-err_hw_init:
-	ieee802154_unregister_hw(devrec->hw);
 err_register_device:
 	ieee802154_free_hw(devrec->hw);
 err_ret:
@@ -801,6 +1362,14 @@
 	return 0;
 }
 
+static const struct of_device_id mrf24j40_of_match[] = {
+	{ .compatible = "microchip,mrf24j40", .data = (void *)MRF24J40 },
+	{ .compatible = "microchip,mrf24j40ma", .data = (void *)MRF24J40MA },
+	{ .compatible = "microchip,mrf24j40mc", .data = (void *)MRF24J40MC },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, mrf24j40_of_match);
+
 static const struct spi_device_id mrf24j40_ids[] = {
 	{ "mrf24j40", MRF24J40 },
 	{ "mrf24j40ma", MRF24J40MA },
@@ -811,6 +1380,7 @@
 
 static struct spi_driver mrf24j40_driver = {
 	.driver = {
+		.of_match_table = of_match_ptr(mrf24j40_of_match),
 		.name = "mrf24j40",
 		.owner = THIS_MODULE,
 	},
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 207f62e..24f8dbc 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -344,6 +344,7 @@
 {
 	const struct iphdr *ip4h = ip_hdr(skb);
 	struct net_device *dev = skb->dev;
+	struct net *net = dev_net(dev);
 	struct rtable *rt;
 	int err, ret = NET_XMIT_DROP;
 	struct flowi4 fl4 = {
@@ -354,7 +355,7 @@
 		.saddr = ip4h->saddr,
 	};
 
-	rt = ip_route_output_flow(dev_net(dev), &fl4, NULL);
+	rt = ip_route_output_flow(net, &fl4, NULL);
 	if (IS_ERR(rt))
 		goto err;
 
@@ -364,7 +365,7 @@
 	}
 	skb_dst_drop(skb);
 	skb_dst_set(skb, &rt->dst);
-	err = ip_local_out(skb);
+	err = ip_local_out(net, skb->sk, skb);
 	if (unlikely(net_xmit_eval(err)))
 		dev->stats.tx_errors++;
 	else
@@ -381,6 +382,7 @@
 {
 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 	struct net_device *dev = skb->dev;
+	struct net *net = dev_net(dev);
 	struct dst_entry *dst;
 	int err, ret = NET_XMIT_DROP;
 	struct flowi6 fl6 = {
@@ -393,7 +395,7 @@
 		.flowi6_proto = ip6h->nexthdr,
 	};
 
-	dst = ip6_route_output(dev_net(dev), NULL, &fl6);
+	dst = ip6_route_output(net, NULL, &fl6);
 	if (dst->error) {
 		ret = dst->error;
 		dst_release(dst);
@@ -401,7 +403,7 @@
 	}
 	skb_dst_drop(skb);
 	skb_dst_set(skb, dst);
-	err = ip6_local_out(skb);
+	err = ip6_local_out(net, skb->sk, skb);
 	if (unlikely(net_xmit_eval(err)))
 		dev->stats.tx_errors++;
 	else
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 58ae11a..64bb44d 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1031,7 +1031,6 @@
 static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
 {
 	struct ali_ircc_cb *self = priv;
-	unsigned long flags;
 	int iobase; 
 	int fcr;    /* FIFO control reg */
 	int lcr;    /* Line control reg */
@@ -1061,8 +1060,6 @@
 	/* Update accounting for new speed */
 	self->io.speed = speed;
 
-	spin_lock_irqsave(&self->lock, flags);
-
 	divisor = 115200/speed;
 	
 	fcr = UART_FCR_ENABLE_FIFO;
@@ -1089,9 +1086,6 @@
 	/* without this, the connection will be broken after come back from FIR speed,
 	   but with this, the SIR connection is harder to established */
 	outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
-	
-	spin_unlock_irqrestore(&self->lock, flags);
-	
 }
 
 static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 1004546..6e8f616 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -19,6 +19,9 @@
 #include <linux/etherdevice.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
 #include <linux/gpio.h>
 #include <linux/slab.h>
 
@@ -27,18 +30,17 @@
 #include <net/irda/wrapper.h>
 #include <net/irda/irda_device.h>
 
-#include <mach/dma.h>
 #include <linux/platform_data/irda-pxaficp.h>
-#include <mach/regs-ost.h>
+#undef __REG
+#define __REG(x) ((x) & 0xffff)
 #include <mach/regs-uart.h>
 
-#define FICP		__REG(0x40800000)  /* Start of FICP area */
-#define ICCR0		__REG(0x40800000)  /* ICP Control Register 0 */
-#define ICCR1		__REG(0x40800004)  /* ICP Control Register 1 */
-#define ICCR2		__REG(0x40800008)  /* ICP Control Register 2 */
-#define ICDR		__REG(0x4080000c)  /* ICP Data Register */
-#define ICSR0		__REG(0x40800014)  /* ICP Status Register 0 */
-#define ICSR1		__REG(0x40800018)  /* ICP Status Register 1 */
+#define ICCR0		0x0000		/* ICP Control Register 0 */
+#define ICCR1		0x0004		/* ICP Control Register 1 */
+#define ICCR2		0x0008		/* ICP Control Register 2 */
+#define ICDR		0x000c		/* ICP Data Register */
+#define ICSR0		0x0014		/* ICP Status Register 0 */
+#define ICSR1		0x0018		/* ICP Status Register 1 */
 
 #define ICCR0_AME	(1 << 7)	/* Address match enable */
 #define ICCR0_TIE	(1 << 6)	/* Transmit FIFO interrupt enable */
@@ -56,9 +58,7 @@
 #define ICCR2_TRIG_16   (1 << 0)	/*	>= 16 bytes */
 #define ICCR2_TRIG_32   (2 << 0)	/*	>= 32 bytes */
 
-#ifdef CONFIG_PXA27x
 #define ICSR0_EOC	(1 << 6)	/* DMA End of Descriptor Chain */
-#endif
 #define ICSR0_FRE	(1 << 5)	/* Framing error */
 #define ICSR0_RFS	(1 << 4)	/* Receive FIFO service request */
 #define ICSR0_TFS	(1 << 3)	/* Transnit FIFO service request */
@@ -99,18 +99,61 @@
                 IrSR_RCVEIR_UART_MODE | \
                 IrSR_XMITIR_IR_MODE)
 
+/* macros for registers read/write */
+#define ficp_writel(irda, val, off)					\
+	do {								\
+		dev_vdbg(irda->dev,					\
+			 "%s():%d ficp_writel(0x%x, %s)\n",		\
+			 __func__, __LINE__, (val), #off);		\
+		writel_relaxed((val), (irda)->irda_base + (off));	\
+	} while (0)
+
+#define ficp_readl(irda, off)						\
+	({								\
+		unsigned int _v;					\
+		_v = readl_relaxed((irda)->irda_base + (off));		\
+		dev_vdbg(irda->dev,					\
+			 "%s():%d ficp_readl(%s): 0x%x\n",		\
+			 __func__, __LINE__, #off, _v);			\
+		_v;							\
+	})
+
+#define stuart_writel(irda, val, off)					\
+	do {								\
+		dev_vdbg(irda->dev,					\
+			 "%s():%d stuart_writel(0x%x, %s)\n",		\
+			 __func__, __LINE__, (val), #off);		\
+		writel_relaxed((val), (irda)->stuart_base + (off));	\
+	} while (0)
+
+#define stuart_readl(irda, off)						\
+	({								\
+		unsigned int _v;					\
+		_v = readl_relaxed((irda)->stuart_base + (off));	\
+		dev_vdbg(irda->dev,					\
+			 "%s():%d stuart_readl(%s): 0x%x\n",		\
+			 __func__, __LINE__, #off, _v);			\
+		_v;							\
+	})
+
 struct pxa_irda {
 	int			speed;
 	int			newspeed;
-	unsigned long		last_oscr;
+	unsigned long long	last_clk;
 
+	void __iomem		*stuart_base;
+	void __iomem		*irda_base;
 	unsigned char		*dma_rx_buff;
 	unsigned char		*dma_tx_buff;
 	dma_addr_t		dma_rx_buff_phy;
 	dma_addr_t		dma_tx_buff_phy;
 	unsigned int		dma_tx_buff_len;
-	int			txdma;
-	int			rxdma;
+	struct dma_chan		*txdma;
+	struct dma_chan		*rxdma;
+	dma_cookie_t		rx_cookie;
+	dma_cookie_t		tx_cookie;
+	int			drcmr_rx;
+	int			drcmr_tx;
 
 	int			uart_irq;
 	int			icp_irq;
@@ -128,6 +171,8 @@
 	struct clk		*cur_clk;
 };
 
+static int pxa_irda_set_speed(struct pxa_irda *si, int speed);
+
 static inline void pxa_irda_disable_clk(struct pxa_irda *si)
 {
 	if (si->cur_clk)
@@ -151,22 +196,41 @@
 #define IS_FIR(si)		((si)->speed >= 4000000)
 #define IRDA_FRAME_SIZE_LIMIT	2047
 
+static void pxa_irda_fir_dma_rx_irq(void *data);
+static void pxa_irda_fir_dma_tx_irq(void *data);
+
 inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
 {
-	DCSR(si->rxdma)  = DCSR_NODESC;
-	DSADR(si->rxdma) = __PREG(ICDR);
-	DTADR(si->rxdma) = si->dma_rx_buff_phy;
-	DCMD(si->rxdma) = DCMD_INCTRGADDR | DCMD_FLOWSRC |  DCMD_WIDTH1 | DCMD_BURST32 | IRDA_FRAME_SIZE_LIMIT;
-	DCSR(si->rxdma) |= DCSR_RUN;
+	struct dma_async_tx_descriptor *tx;
+
+	tx = dmaengine_prep_slave_single(si->rxdma, si->dma_rx_buff_phy,
+					 IRDA_FRAME_SIZE_LIMIT, DMA_FROM_DEVICE,
+					 DMA_PREP_INTERRUPT);
+	if (!tx) {
+		dev_err(si->dev, "prep_slave_sg() failed\n");
+		return;
+	}
+	tx->callback = pxa_irda_fir_dma_rx_irq;
+	tx->callback_param = si;
+	si->rx_cookie = dmaengine_submit(tx);
+	dma_async_issue_pending(si->rxdma);
 }
 
 inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
 {
-	DCSR(si->txdma)  = DCSR_NODESC;
-	DSADR(si->txdma) = si->dma_tx_buff_phy;
-	DTADR(si->txdma) = __PREG(ICDR);
-	DCMD(si->txdma) = DCMD_INCSRCADDR | DCMD_FLOWTRG |  DCMD_ENDIRQEN | DCMD_WIDTH1 | DCMD_BURST32 | si->dma_tx_buff_len;
-	DCSR(si->txdma) |= DCSR_RUN;
+	struct dma_async_tx_descriptor *tx;
+
+	tx = dmaengine_prep_slave_single(si->txdma, si->dma_tx_buff_phy,
+					 si->dma_tx_buff_len, DMA_TO_DEVICE,
+					 DMA_PREP_INTERRUPT);
+	if (!tx) {
+		dev_err(si->dev, "prep_slave_sg() failed\n");
+		return;
+	}
+	tx->callback = pxa_irda_fir_dma_tx_irq;
+	tx->callback_param = si;
+	si->tx_cookie = dmaengine_submit(tx);
+	dma_async_issue_pending(si->rxdma);
 }
 
 /*
@@ -205,9 +269,9 @@
 
 		if (IS_FIR(si)) {
 			/* stop RX DMA */
-			DCSR(si->rxdma) &= ~DCSR_RUN;
+			dmaengine_terminate_all(si->rxdma);
 			/* disable FICP */
-			ICCR0 = 0;
+			ficp_writel(si, 0, ICCR0);
 			pxa_irda_disable_clk(si);
 
 			/* set board transceiver to SIR mode */
@@ -218,17 +282,19 @@
 		}
 
 		/* disable STUART first */
-		STIER = 0;
+		stuart_writel(si, 0, STIER);
 
 		/* access DLL & DLH */
-		STLCR |= LCR_DLAB;
-		STDLL = divisor & 0xff;
-		STDLH = divisor >> 8;
-		STLCR &= ~LCR_DLAB;
+		stuart_writel(si, stuart_readl(si, STLCR) | LCR_DLAB, STLCR);
+		stuart_writel(si, divisor & 0xff, STDLL);
+		stuart_writel(si, divisor >> 8, STDLH);
+		stuart_writel(si, stuart_readl(si, STLCR) & ~LCR_DLAB, STLCR);
 
 		si->speed = speed;
-		STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
-		STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
+		stuart_writel(si, IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6,
+			      STISR);
+		stuart_writel(si, IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE,
+			      STIER);
 
 		local_irq_restore(flags);
 		break;
@@ -237,12 +303,12 @@
 		local_irq_save(flags);
 
 		/* disable STUART */
-		STIER = 0;
-		STISR = 0;
+		stuart_writel(si, 0, STIER);
+		stuart_writel(si, 0, STISR);
 		pxa_irda_disable_clk(si);
 
 		/* disable FICP first */
-		ICCR0 = 0;
+		ficp_writel(si, 0, ICCR0);
 
 		/* set board transceiver to FIR mode */
 		pxa_irda_set_mode(si, IR_FIRMODE);
@@ -252,7 +318,7 @@
 
 		si->speed = speed;
 		pxa_irda_fir_dma_rx_start(si);
-		ICCR0 = ICCR0_ITR | ICCR0_RXE;
+		ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
 
 		local_irq_restore(flags);
 		break;
@@ -271,13 +337,13 @@
 	struct pxa_irda *si = netdev_priv(dev);
 	int iir, lsr, data;
 
-	iir = STIIR;
+	iir = stuart_readl(si, STIIR);
 
 	switch  (iir & 0x0F) {
 	case 0x06: /* Receiver Line Status */
-	  	lsr = STLSR;
+		lsr = stuart_readl(si, STLSR);
 		while (lsr & LSR_FIFOE) {
-			data = STRBR;
+			data = stuart_readl(si, STRBR);
 			if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
 				printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
 				dev->stats.rx_errors++;
@@ -290,9 +356,9 @@
 				async_unwrap_char(dev, &dev->stats,
 						  &si->rx_buff, data);
 			}
-			lsr = STLSR;
+			lsr = stuart_readl(si, STLSR);
 		}
-		si->last_oscr = readl_relaxed(OSCR);
+		si->last_clk = sched_clock();
 		break;
 
 	case 0x04: /* Received Data Available */
@@ -301,14 +367,16 @@
 	case 0x0C: /* Character Timeout Indication */
 	  	do  {
 		    dev->stats.rx_bytes++;
-	            async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR);
-	  	} while (STLSR & LSR_DR);
-		si->last_oscr = readl_relaxed(OSCR);
+		    async_unwrap_char(dev, &dev->stats, &si->rx_buff,
+				      stuart_readl(si, STRBR));
+		} while (stuart_readl(si, STLSR) & LSR_DR);
+		si->last_clk = sched_clock();
 	  	break;
 
 	case 0x02: /* Transmit FIFO Data Request */
-	    	while ((si->tx_buff.len) && (STLSR & LSR_TDRQ)) {
-	    		STTHR = *si->tx_buff.data++;
+		while ((si->tx_buff.len) &&
+		       (stuart_readl(si, STLSR) & LSR_TDRQ)) {
+			stuart_writel(si, *si->tx_buff.data++, STTHR);
 			si->tx_buff.len -= 1;
 	    	}
 
@@ -317,9 +385,9 @@
 			dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head;
 
                         /* We need to ensure that the transmitter has finished. */
-			while ((STLSR & LSR_TEMT) == 0)
+			while ((stuart_readl(si, STLSR) & LSR_TEMT) == 0)
 				cpu_relax();
-			si->last_oscr = readl_relaxed(OSCR);
+			si->last_clk = sched_clock();
 
 			/*
 		 	* Ok, we've finished transmitting.  Now enable
@@ -331,9 +399,11 @@
 				si->newspeed = 0;
 			} else {
 				/* enable IR Receiver, disable IR Transmitter */
-				STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
+				stuart_writel(si, IrSR_IR_RECEIVE_ON |
+					      IrSR_XMODE_PULSE_1_6, STISR);
 				/* enable STUART and receive interrupts */
-				STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
+				stuart_writel(si, IER_UUE | IER_RLSE |
+					      IER_RAVIE | IER_RTIOE, STIER);
 			}
 			/* I'm hungry! */
 			netif_wake_queue(dev);
@@ -345,35 +415,32 @@
 }
 
 /* FIR Receive DMA interrupt handler */
-static void pxa_irda_fir_dma_rx_irq(int channel, void *data)
-{
-	int dcsr = DCSR(channel);
-
-	DCSR(channel) = dcsr & ~DCSR_RUN;
-
-	printk(KERN_DEBUG "pxa_ir: fir rx dma bus error %#x\n", dcsr);
-}
-
-/* FIR Transmit DMA interrupt handler */
-static void pxa_irda_fir_dma_tx_irq(int channel, void *data)
+static void pxa_irda_fir_dma_rx_irq(void *data)
 {
 	struct net_device *dev = data;
 	struct pxa_irda *si = netdev_priv(dev);
-	int dcsr;
 
-	dcsr = DCSR(channel);
-	DCSR(channel) = dcsr & ~DCSR_RUN;
+	dmaengine_terminate_all(si->rxdma);
+	netdev_dbg(dev, "pxa_ir: fir rx dma bus error\n");
+}
 
-	if (dcsr & DCSR_ENDINTR)  {
+/* FIR Transmit DMA interrupt handler */
+static void pxa_irda_fir_dma_tx_irq(void *data)
+{
+	struct net_device *dev = data;
+	struct pxa_irda *si = netdev_priv(dev);
+
+	dmaengine_terminate_all(si->txdma);
+	if (dmaengine_tx_status(si->txdma, si->tx_cookie, NULL) == DMA_ERROR) {
+		dev->stats.tx_errors++;
+	} else {
 		dev->stats.tx_packets++;
 		dev->stats.tx_bytes += si->dma_tx_buff_len;
-	} else {
-		dev->stats.tx_errors++;
 	}
 
-	while (ICSR1 & ICSR1_TBY)
+	while (ficp_readl(si, ICSR1) & ICSR1_TBY)
 		cpu_relax();
-	si->last_oscr = readl_relaxed(OSCR);
+	si->last_clk = sched_clock();
 
 	/*
 	 * HACK: It looks like the TBY bit is dropped too soon.
@@ -387,11 +454,11 @@
 	} else {
 		int i = 64;
 
-		ICCR0 = 0;
+		ficp_writel(si, 0, ICCR0);
 		pxa_irda_fir_dma_rx_start(si);
-		while ((ICSR1 & ICSR1_RNE) && i--)
-			(void)ICDR;
-		ICCR0 = ICCR0_ITR | ICCR0_RXE;
+		while ((ficp_readl(si, ICSR1) & ICSR1_RNE) && i--)
+			ficp_readl(si, ICDR);
+		ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
 
 		if (i < 0)
 			printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
@@ -403,15 +470,18 @@
 static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
 {
 	unsigned int len, stat, data;
+	struct dma_tx_state state;
 
 	/* Get the current data position. */
-	len = DTADR(si->rxdma) - si->dma_rx_buff_phy;
+
+	dmaengine_tx_status(si->rxdma, si->rx_cookie, &state);
+	len = IRDA_FRAME_SIZE_LIMIT - state.residue;
 
 	do {
 		/* Read Status, and then Data. 	 */
-		stat = ICSR1;
+		stat = ficp_readl(si, ICSR1);
 		rmb();
-		data = ICDR;
+		data = ficp_readl(si, ICDR);
 
 		if (stat & (ICSR1_CRE | ICSR1_ROR)) {
 			dev->stats.rx_errors++;
@@ -429,7 +499,7 @@
 		/* If we hit the end of frame, there's no point in continuing. */
 		if (stat & ICSR1_EOF)
 			break;
-	} while (ICSR0 & ICSR0_EIF);
+	} while (ficp_readl(si, ICSR0) & ICSR0_EIF);
 
 	if (stat & ICSR1_EOF) {
 		/* end of frame. */
@@ -472,9 +542,9 @@
 	int icsr0, i = 64;
 
 	/* stop RX DMA */
-	DCSR(si->rxdma) &= ~DCSR_RUN;
-	si->last_oscr = readl_relaxed(OSCR);
-	icsr0 = ICSR0;
+	dmaengine_terminate_all(si->rxdma);
+	si->last_clk = sched_clock();
+	icsr0 = ficp_readl(si, ICSR0);
 
 	if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
 		if (icsr0 & ICSR0_FRE) {
@@ -484,7 +554,7 @@
 			printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
 			dev->stats.rx_errors++;
 		}
-		ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB);
+		ficp_writel(si, icsr0 & (ICSR0_FRE | ICSR0_RAB), ICSR0);
 	}
 
 	if (icsr0 & ICSR0_EIF) {
@@ -492,11 +562,11 @@
 		pxa_irda_fir_irq_eif(si, dev, icsr0);
 	}
 
-	ICCR0 = 0;
+	ficp_writel(si, 0, ICCR0);
 	pxa_irda_fir_dma_rx_start(si);
-	while ((ICSR1 & ICSR1_RNE) && i--)
-		(void)ICDR;
-	ICCR0 = ICCR0_ITR | ICCR0_RXE;
+	while ((ficp_readl(si, ICSR1) & ICSR1_RNE) && i--)
+		ficp_readl(si, ICDR);
+	ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
 
 	if (i < 0)
 		printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
@@ -537,11 +607,12 @@
 		si->tx_buff.len  = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
 
 		/* Disable STUART interrupts and switch to transmit mode. */
-		STIER = 0;
-		STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6;
+		stuart_writel(si, 0, STIER);
+		stuart_writel(si, IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6,
+			      STISR);
 
 		/* enable STUART and transmit interrupts */
-		STIER = IER_UUE | IER_TIE;
+		stuart_writel(si, IER_UUE | IER_TIE, STIER);
 	} else {
 		unsigned long mtt = irda_get_mtt(skb);
 
@@ -549,15 +620,15 @@
 		skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
 
 		if (mtt)
-			while ((unsigned)(readl_relaxed(OSCR) - si->last_oscr)/4 < mtt)
+			while ((sched_clock() - si->last_clk) * 1000 < mtt)
 				cpu_relax();
 
 		/* stop RX DMA,  disable FICP */
-		DCSR(si->rxdma) &= ~DCSR_RUN;
-		ICCR0 = 0;
+		dmaengine_terminate_all(si->rxdma);
+		ficp_writel(si, 0, ICCR0);
 
 		pxa_irda_fir_dma_tx_start(si);
-		ICCR0 = ICCR0_ITR | ICCR0_TXE;
+		ficp_writel(si, ICCR0_ITR | ICCR0_TXE, ICCR0);
 	}
 
 	dev_kfree_skb(skb);
@@ -613,22 +684,18 @@
 static void pxa_irda_startup(struct pxa_irda *si)
 {
 	/* Disable STUART interrupts */
-	STIER = 0;
+	stuart_writel(si, 0, STIER);
 	/* enable STUART interrupt to the processor */
-	STMCR = MCR_OUT2;
+	stuart_writel(si, MCR_OUT2, STMCR);
 	/* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */
-	STLCR = LCR_WLS0 | LCR_WLS1;
+	stuart_writel(si, LCR_WLS0 | LCR_WLS1, STLCR);
 	/* enable FIFO, we use FIFO to improve performance */
-	STFCR = FCR_TRFIFOE | FCR_ITL_32;
+	stuart_writel(si, FCR_TRFIFOE | FCR_ITL_32, STFCR);
 
 	/* disable FICP */
-	ICCR0 = 0;
+	ficp_writel(si, 0, ICCR0);
 	/* configure FICP ICCR2 */
-	ICCR2 = ICCR2_TXP | ICCR2_TRIG_32;
-
-	/* configure DMAC */
-	DRCMR(17) = si->rxdma | DRCMR_MAPVLD;
-	DRCMR(18) = si->txdma | DRCMR_MAPVLD;
+	ficp_writel(si, ICCR2_TXP | ICCR2_TRIG_32, ICCR2);
 
 	/* force SIR reinitialization */
 	si->speed = 4000000;
@@ -644,22 +711,19 @@
 	local_irq_save(flags);
 
 	/* disable STUART and interrupt */
-	STIER = 0;
+	stuart_writel(si, 0, STIER);
 	/* disable STUART SIR mode */
-	STISR = 0;
+	stuart_writel(si, 0, STISR);
 
 	/* disable DMA */
-	DCSR(si->txdma) &= ~DCSR_RUN;
-	DCSR(si->rxdma) &= ~DCSR_RUN;
+	dmaengine_terminate_all(si->rxdma);
+	dmaengine_terminate_all(si->txdma);
 	/* disable FICP */
-	ICCR0 = 0;
+	ficp_writel(si, 0, ICCR0);
 
 	/* disable the STUART or FICP clocks */
 	pxa_irda_disable_clk(si);
 
-	DRCMR(17) = 0;
-	DRCMR(18) = 0;
-
 	local_irq_restore(flags);
 
 	/* power off board transceiver */
@@ -671,6 +735,9 @@
 static int pxa_irda_start(struct net_device *dev)
 {
 	struct pxa_irda *si = netdev_priv(dev);
+	dma_cap_mask_t mask;
+	struct dma_slave_config	config;
+	struct pxad_param param;
 	int err;
 
 	si->speed = 9600;
@@ -690,14 +757,37 @@
 	disable_irq(si->icp_irq);
 
 	err = -EBUSY;
-	si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev);
-	if (si->rxdma < 0)
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+	param.prio = PXAD_PRIO_LOWEST;
+
+	memset(&config, 0, sizeof(config));
+	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	config.src_addr = (dma_addr_t)si->irda_base + ICDR;
+	config.dst_addr = (dma_addr_t)si->irda_base + ICDR;
+	config.src_maxburst = 32;
+	config.dst_maxburst = 32;
+
+	param.drcmr = si->drcmr_rx;
+	si->rxdma = dma_request_slave_channel_compat(mask, pxad_filter_fn,
+						     &param, &dev->dev, "rx");
+	if (!si->rxdma)
 		goto err_rx_dma;
 
-	si->txdma = pxa_request_dma("FICP_TX",DMA_PRIO_LOW, pxa_irda_fir_dma_tx_irq, dev);
-	if (si->txdma < 0)
+	param.drcmr = si->drcmr_tx;
+	si->txdma = dma_request_slave_channel_compat(mask, pxad_filter_fn,
+						     &param, &dev->dev, "tx");
+	if (!si->txdma)
 		goto err_tx_dma;
 
+	err = dmaengine_slave_config(si->rxdma, &config);
+	if (err)
+		goto err_dma_rx_buff;
+	err = dmaengine_slave_config(si->txdma, &config);
+	if (err)
+		goto err_dma_rx_buff;
+
 	err = -ENOMEM;
 	si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
 					     &si->dma_rx_buff_phy, GFP_KERNEL);
@@ -737,9 +827,9 @@
 err_dma_tx_buff:
 	dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
 err_dma_rx_buff:
-	pxa_free_dma(si->txdma);
+	dma_release_channel(si->txdma);
 err_tx_dma:
-	pxa_free_dma(si->rxdma);
+	dma_release_channel(si->rxdma);
 err_rx_dma:
 	free_irq(si->icp_irq, dev);
 err_irq2:
@@ -766,8 +856,10 @@
 	free_irq(si->uart_irq, dev);
 	free_irq(si->icp_irq, dev);
 
-	pxa_free_dma(si->rxdma);
-	pxa_free_dma(si->txdma);
+	dmaengine_terminate_all(si->rxdma);
+	dmaengine_terminate_all(si->txdma);
+	dma_release_channel(si->rxdma);
+	dma_release_channel(si->txdma);
 
 	if (si->dma_rx_buff)
 		dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
@@ -830,25 +922,33 @@
 static int pxa_irda_probe(struct platform_device *pdev)
 {
 	struct net_device *dev;
+	struct resource *res;
 	struct pxa_irda *si;
+	void __iomem *ficp, *stuart;
 	unsigned int baudrate_mask;
 	int err;
 
 	if (!pdev->dev.platform_data)
 		return -ENODEV;
 
-	err = request_mem_region(__PREG(STUART), 0x24, "IrDA") ? 0 : -EBUSY;
-	if (err)
-		goto err_mem_1;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ficp = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ficp)) {
+		dev_err(&pdev->dev, "resource ficp not defined\n");
+		return PTR_ERR(ficp);
+	}
 
-	err = request_mem_region(__PREG(FICP), 0x1c, "IrDA") ? 0 : -EBUSY;
-	if (err)
-		goto err_mem_2;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	stuart = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(stuart)) {
+		dev_err(&pdev->dev, "resource stuart not defined\n");
+		return PTR_ERR(stuart);
+	}
 
 	dev = alloc_irdadev(sizeof(struct pxa_irda));
 	if (!dev) {
 		err = -ENOMEM;
-		goto err_mem_3;
+		goto err_mem_1;
 	}
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
@@ -856,16 +956,25 @@
 	si->dev = &pdev->dev;
 	si->pdata = pdev->dev.platform_data;
 
+	si->irda_base = ficp;
+	si->stuart_base = stuart;
 	si->uart_irq = platform_get_irq(pdev, 0);
 	si->icp_irq = platform_get_irq(pdev, 1);
 
-	si->sir_clk = clk_get(&pdev->dev, "UARTCLK");
-	si->fir_clk = clk_get(&pdev->dev, "FICPCLK");
+	si->sir_clk = devm_clk_get(&pdev->dev, "UARTCLK");
+	si->fir_clk = devm_clk_get(&pdev->dev, "FICPCLK");
 	if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) {
 		err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk);
 		goto err_mem_4;
 	}
 
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (res)
+		si->drcmr_rx = res->start;
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+	if (res)
+		si->drcmr_tx = res->start;
+
 	/*
 	 * Initialise the SIR buffers
 	 */
@@ -925,15 +1034,7 @@
 err_mem_5:
 		kfree(si->rx_buff.head);
 err_mem_4:
-		if (si->sir_clk && !IS_ERR(si->sir_clk))
-			clk_put(si->sir_clk);
-		if (si->fir_clk && !IS_ERR(si->fir_clk))
-			clk_put(si->fir_clk);
 		free_netdev(dev);
-err_mem_3:
-		release_mem_region(__PREG(FICP), 0x1c);
-err_mem_2:
-		release_mem_region(__PREG(STUART), 0x24);
 	}
 err_mem_1:
 	return err;
@@ -952,14 +1053,9 @@
 			si->pdata->shutdown(si->dev);
 		kfree(si->tx_buff.head);
 		kfree(si->rx_buff.head);
-		clk_put(si->fir_clk);
-		clk_put(si->sir_clk);
 		free_netdev(dev);
 	}
 
-	release_mem_region(__PREG(STUART), 0x24);
-	release_mem_region(__PREG(FICP), 0x1c);
-
 	return 0;
 }
 
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 47da435..86f6c62 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -412,7 +412,7 @@
 
 	port = macvlan_port_get_rcu(skb->dev);
 	if (is_multicast_ether_addr(eth->h_dest)) {
-		skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
+		skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
 		if (!skb)
 			return RX_HANDLER_CONSUMED;
 		eth = eth_hdr(skb);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index edd7734..248478c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -1111,10 +1111,10 @@
 		return 0;
 
 	case TUNSETSNDBUF:
-		if (get_user(u, up))
+		if (get_user(s, sp))
 			return -EFAULT;
 
-		q->sk.sk_sndbuf = u;
+		q->sk.sk_sndbuf = s;
 		return 0;
 
 	case TUNGETVNETHDRSZ:
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index d8757bf..a9acf71 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -61,11 +61,21 @@
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Intel Corporation");
 
+/* Time in usecs for tx resource reaper */
+static unsigned int tx_time = 1;
+
+/* Number of descriptors to free before resuming tx */
+static unsigned int tx_start = 10;
+
+/* Number of descriptors still available before stop upper layer tx */
+static unsigned int tx_stop = 5;
+
 struct ntb_netdev {
 	struct list_head list;
 	struct pci_dev *pdev;
 	struct net_device *ndev;
 	struct ntb_transport_qp *qp;
+	struct timer_list tx_timer;
 };
 
 #define	NTB_TX_TIMEOUT_MS	1000
@@ -136,11 +146,42 @@
 	}
 }
 
+static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev,
+				      struct ntb_transport_qp *qp, int size)
+{
+	struct ntb_netdev *dev = netdev_priv(netdev);
+
+	netif_stop_queue(netdev);
+	/* Make sure to see the latest value of ntb_transport_tx_free_entry()
+	 * since the queue was last started.
+	 */
+	smp_mb();
+
+	if (likely(ntb_transport_tx_free_entry(qp) < size)) {
+		mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
+		return -EBUSY;
+	}
+
+	netif_start_queue(netdev);
+	return 0;
+}
+
+static int ntb_netdev_maybe_stop_tx(struct net_device *ndev,
+				    struct ntb_transport_qp *qp, int size)
+{
+	if (netif_queue_stopped(ndev) ||
+	    (ntb_transport_tx_free_entry(qp) >= size))
+		return 0;
+
+	return __ntb_netdev_maybe_stop_tx(ndev, qp, size);
+}
+
 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
 				  void *data, int len)
 {
 	struct net_device *ndev = qp_data;
 	struct sk_buff *skb;
+	struct ntb_netdev *dev = netdev_priv(ndev);
 
 	skb = data;
 	if (!skb || !ndev)
@@ -155,6 +196,15 @@
 	}
 
 	dev_kfree_skb(skb);
+
+	if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
+		/* Make sure anybody stopping the queue after this sees the new
+		 * value of ntb_transport_tx_free_entry()
+		 */
+		smp_mb();
+		if (netif_queue_stopped(ndev))
+			netif_wake_queue(ndev);
+	}
 }
 
 static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
@@ -163,10 +213,15 @@
 	struct ntb_netdev *dev = netdev_priv(ndev);
 	int rc;
 
+	ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
+
 	rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
 	if (rc)
 		goto err;
 
+	/* check for next submit */
+	ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
+
 	return NETDEV_TX_OK;
 
 err:
@@ -175,6 +230,23 @@
 	return NETDEV_TX_BUSY;
 }
 
+static void ntb_netdev_tx_timer(unsigned long data)
+{
+	struct net_device *ndev = (struct net_device *)data;
+	struct ntb_netdev *dev = netdev_priv(ndev);
+
+	if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
+		mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time));
+	} else {
+		/* Make sure anybody stopping the queue after this sees the new
+		 * value of ntb_transport_tx_free_entry()
+		 */
+		smp_mb();
+		if (netif_queue_stopped(ndev))
+			netif_wake_queue(ndev);
+	}
+}
+
 static int ntb_netdev_open(struct net_device *ndev)
 {
 	struct ntb_netdev *dev = netdev_priv(ndev);
@@ -197,8 +269,11 @@
 		}
 	}
 
+	setup_timer(&dev->tx_timer, ntb_netdev_tx_timer, (unsigned long)ndev);
+
 	netif_carrier_off(ndev);
 	ntb_transport_link_up(dev->qp);
+	netif_start_queue(ndev);
 
 	return 0;
 
@@ -219,6 +294,8 @@
 	while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
 		dev_kfree_skb(skb);
 
+	del_timer_sync(&dev->tx_timer);
+
 	return 0;
 }
 
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index c5ad98a..9d097ae 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -69,20 +69,39 @@
 	---help---
 	  Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs
 
+config BCM_NET_PHYLIB
+	tristate
+
 config BROADCOM_PHY
 	tristate "Drivers for Broadcom PHYs"
+	select BCM_NET_PHYLIB
 	---help---
 	  Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
 	  BCM5481 and BCM5482 PHYs.
 
+config BCM_CYGNUS_PHY
+	tristate "Drivers for Broadcom Cygnus SoC internal PHY"
+	depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+	depends on MDIO_BCM_IPROC
+	select BCM_NET_PHYLIB
+	---help---
+	  This PHY driver is for the 1G internal PHYs of the Broadcom
+	  Cygnus Family SoC.
+
+	  Currently supports internal PHY's used in the BCM11300,
+	  BCM11320, BCM11350, BCM11360, BCM58300, BCM58302,
+	  BCM58303 & BCM58305 Broadcom Cygnus SoCs.
+
 config BCM63XX_PHY
 	tristate "Drivers for Broadcom 63xx SOCs internal PHY"
 	depends on BCM63XX
+	select BCM_NET_PHYLIB
 	---help---
 	  Currently supports the 6348 and 6358 PHYs.
 
 config BCM7XXX_PHY
 	tristate "Drivers for Broadcom 7xxx SOCs internal PHYs"
+	select BCM_NET_PHYLIB
 	---help---
 	  Currently supports the BCM7366, BCM7439, BCM7445, and
 	  40nm and 65nm generation of BCM7xxx Set Top Box SoCs.
@@ -225,6 +244,15 @@
 	  This hardware can be found in the Broadcom GENET Ethernet MAC
 	  controllers as well as some Broadcom Ethernet switches such as the
 	  Starfighter 2 switches.
+
+config MDIO_BCM_IPROC
+	tristate "Broadcom iProc MDIO bus controller"
+	depends on ARCH_BCM_IPROC || COMPILE_TEST
+	depends on HAS_IOMEM && OF_MDIO
+	help
+	  This module provides a driver for the MDIO busses found in the
+	  Broadcom iProc SoC's.
+
 endif # PHYLIB
 
 config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 87f079c..7655d47 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -12,10 +12,12 @@
 obj-$(CONFIG_SMSC_PHY)		+= smsc.o
 obj-$(CONFIG_TERANETICS_PHY)	+= teranetics.o
 obj-$(CONFIG_VITESSE_PHY)	+= vitesse.o
+obj-$(CONFIG_BCM_NET_PHYLIB)	+= bcm-phy-lib.o
 obj-$(CONFIG_BROADCOM_PHY)	+= broadcom.o
 obj-$(CONFIG_BCM63XX_PHY)	+= bcm63xx.o
 obj-$(CONFIG_BCM7XXX_PHY)	+= bcm7xxx.o
 obj-$(CONFIG_BCM87XX_PHY)	+= bcm87xx.o
+obj-$(CONFIG_BCM_CYGNUS_PHY)	+= bcm-cygnus.o
 obj-$(CONFIG_ICPLUS_PHY)	+= icplus.o
 obj-$(CONFIG_REALTEK_PHY)	+= realtek.o
 obj-$(CONFIG_LSI_ET1011C_PHY)	+= et1011c.o
@@ -38,3 +40,4 @@
 obj-$(CONFIG_MDIO_MOXART)	+= mdio-moxart.o
 obj-$(CONFIG_MDIO_BCM_UNIMAC)	+= mdio-bcm-unimac.o
 obj-$(CONFIG_MICROCHIP_PHY)	+= microchip.o
+obj-$(CONFIG_MDIO_BCM_IPROC)	+= mdio-bcm-iproc.o
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
new file mode 100644
index 0000000..49bbc68
--- /dev/null
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Broadcom Cygnus SoC internal transceivers support. */
+#include "bcm-phy-lib.h"
+#include <linux/brcmphy.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+/* Broadcom Cygnus Phy specific registers */
+#define MII_BCM_CYGNUS_AFE_VDAC_ICTRL_0  0x91E5 /* VDAL Control register */
+
+static int bcm_cygnus_afe_config(struct phy_device *phydev)
+{
+	int rc;
+
+	/* ensure smdspclk is enabled */
+	rc = phy_write(phydev, MII_BCM54XX_AUX_CTL, 0x0c30);
+	if (rc < 0)
+		return rc;
+
+	/* AFE_VDAC_ICTRL_0 bit 7:4 Iq=1100 for 1g 10bt, normal modes */
+	rc = bcm_phy_write_misc(phydev, 0x39, 0x01, 0xA7C8);
+	if (rc < 0)
+		return rc;
+
+	/* AFE_HPF_TRIM_OTHERS bit11=1, short cascode enable for all modes*/
+	rc = bcm_phy_write_misc(phydev, 0x3A, 0x00, 0x0803);
+	if (rc < 0)
+		return rc;
+
+	/* AFE_TX_CONFIG_1 bit 7:4 Iq=1100 for test modes */
+	rc = bcm_phy_write_misc(phydev, 0x3A, 0x01, 0xA740);
+	if (rc < 0)
+		return rc;
+
+	/* AFE TEMPSEN_OTHERS rcal_HT, rcal_LT 10000 */
+	rc = bcm_phy_write_misc(phydev, 0x3A, 0x03, 0x8400);
+	if (rc < 0)
+		return rc;
+
+	/* AFE_FUTURE_RSV bit 2:0 rccal <2:0>=100 */
+	rc = bcm_phy_write_misc(phydev, 0x3B, 0x00, 0x0004);
+	if (rc < 0)
+		return rc;
+
+	/* Adjust bias current trim to overcome digital offSet */
+	rc = phy_write(phydev, MII_BRCM_CORE_BASE1E, 0x02);
+	if (rc < 0)
+		return rc;
+
+	/* make rcal=100, since rdb default is 000 */
+	rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10);
+	if (rc < 0)
+		return rc;
+
+	/* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
+	rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10);
+	if (rc < 0)
+		return rc;
+
+	/* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
+	rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00);
+
+	return 0;
+}
+
+static int bcm_cygnus_config_init(struct phy_device *phydev)
+{
+	int reg, rc;
+
+	reg = phy_read(phydev, MII_BCM54XX_ECR);
+	if (reg < 0)
+		return reg;
+
+	/* Mask interrupts globally. */
+	reg |= MII_BCM54XX_ECR_IM;
+	rc = phy_write(phydev, MII_BCM54XX_ECR, reg);
+	if (rc)
+		return rc;
+
+	/* Unmask events of interest */
+	reg = ~(MII_BCM54XX_INT_DUPLEX |
+		MII_BCM54XX_INT_SPEED |
+		MII_BCM54XX_INT_LINK);
+	rc = phy_write(phydev, MII_BCM54XX_IMR, reg);
+	if (rc)
+		return rc;
+
+	/* Apply AFE settings for the PHY */
+	rc = bcm_cygnus_afe_config(phydev);
+	if (rc)
+		return rc;
+
+	/* Advertise EEE */
+	rc = bcm_phy_enable_eee(phydev);
+	if (rc)
+		return rc;
+
+	/* Enable APD */
+	return bcm_phy_enable_apd(phydev, false);
+}
+
+static int bcm_cygnus_resume(struct phy_device *phydev)
+{
+	int rc;
+
+	genphy_resume(phydev);
+
+	/* Re-initialize the PHY to apply AFE work-arounds and
+	 * configurations when coming out of suspend.
+	 */
+	rc = bcm_cygnus_config_init(phydev);
+	if (rc)
+		return rc;
+
+	/* restart auto negotiation with the new settings */
+	return genphy_config_aneg(phydev);
+}
+
+static struct phy_driver bcm_cygnus_phy_driver[] = {
+{
+	.phy_id        = PHY_ID_BCM_CYGNUS,
+	.phy_id_mask   = 0xfffffff0,
+	.name          = "Broadcom Cygnus PHY",
+	.features      = PHY_GBIT_FEATURES |
+			SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+	.config_init   = bcm_cygnus_config_init,
+	.config_aneg   = genphy_config_aneg,
+	.read_status   = genphy_read_status,
+	.ack_interrupt = bcm_phy_ack_intr,
+	.config_intr   = bcm_phy_config_intr,
+	.suspend       = genphy_suspend,
+	.resume        = bcm_cygnus_resume,
+} };
+
+static struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = {
+	{ PHY_ID_BCM_CYGNUS, 0xfffffff0, },
+	{ }
+};
+MODULE_DEVICE_TABLE(mdio, bcm_cygnus_phy_tbl);
+
+module_phy_driver(bcm_cygnus_phy_driver);
+
+MODULE_DESCRIPTION("Broadcom Cygnus internal PHY driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
new file mode 100644
index 0000000..dd79ea6
--- /dev/null
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "bcm-phy-lib.h"
+#include <linux/brcmphy.h>
+#include <linux/export.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+
+#define MII_BCM_CHANNEL_WIDTH     0x2000
+#define BCM_CL45VEN_EEE_ADV       0x3c
+
+int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val)
+{
+	int rc;
+
+	rc = phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
+	if (rc < 0)
+		return rc;
+
+	return phy_write(phydev, MII_BCM54XX_EXP_DATA, val);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_write_exp);
+
+int bcm_phy_read_exp(struct phy_device *phydev, u16 reg)
+{
+	int val;
+
+	val = phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
+	if (val < 0)
+		return val;
+
+	val = phy_read(phydev, MII_BCM54XX_EXP_DATA);
+
+	/* Restore default value.  It's O.K. if this write fails. */
+	phy_write(phydev, MII_BCM54XX_EXP_SEL, 0);
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_read_exp);
+
+int bcm_phy_write_misc(struct phy_device *phydev,
+		       u16 reg, u16 chl, u16 val)
+{
+	int rc;
+	int tmp;
+
+	rc = phy_write(phydev, MII_BCM54XX_AUX_CTL,
+		       MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+	if (rc < 0)
+		return rc;
+
+	tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
+	tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
+	rc = phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
+	if (rc < 0)
+		return rc;
+
+	tmp = (chl * MII_BCM_CHANNEL_WIDTH) | reg;
+	rc = bcm_phy_write_exp(phydev, tmp, val);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_write_misc);
+
+int bcm_phy_read_misc(struct phy_device *phydev,
+		      u16 reg, u16 chl)
+{
+	int rc;
+	int tmp;
+
+	rc = phy_write(phydev, MII_BCM54XX_AUX_CTL,
+		       MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+	if (rc < 0)
+		return rc;
+
+	tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
+	tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
+	rc = phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
+	if (rc < 0)
+		return rc;
+
+	tmp = (chl * MII_BCM_CHANNEL_WIDTH) | reg;
+	rc = bcm_phy_read_exp(phydev, tmp);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_read_misc);
+
+int bcm_phy_ack_intr(struct phy_device *phydev)
+{
+	int reg;
+
+	/* Clear pending interrupts.  */
+	reg = phy_read(phydev, MII_BCM54XX_ISR);
+	if (reg < 0)
+		return reg;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_ack_intr);
+
+int bcm_phy_config_intr(struct phy_device *phydev)
+{
+	int reg;
+
+	reg = phy_read(phydev, MII_BCM54XX_ECR);
+	if (reg < 0)
+		return reg;
+
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+		reg &= ~MII_BCM54XX_ECR_IM;
+	else
+		reg |= MII_BCM54XX_ECR_IM;
+
+	return phy_write(phydev, MII_BCM54XX_ECR, reg);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_config_intr);
+
+int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow)
+{
+	phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
+	return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
+}
+EXPORT_SYMBOL_GPL(bcm_phy_read_shadow);
+
+int bcm_phy_write_shadow(struct phy_device *phydev, u16 shadow,
+			 u16 val)
+{
+	return phy_write(phydev, MII_BCM54XX_SHD,
+			 MII_BCM54XX_SHD_WRITE |
+			 MII_BCM54XX_SHD_VAL(shadow) |
+			 MII_BCM54XX_SHD_DATA(val));
+}
+EXPORT_SYMBOL_GPL(bcm_phy_write_shadow);
+
+int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down)
+{
+	int val;
+
+	if (dll_pwr_down) {
+		val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
+		if (val < 0)
+			return val;
+
+		val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
+		bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val);
+	}
+
+	val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_APD);
+	if (val < 0)
+		return val;
+
+	/* Clear APD bits */
+	val &= BCM_APD_CLR_MASK;
+
+	if (phydev->autoneg == AUTONEG_ENABLE)
+		val |= BCM54XX_SHD_APD_EN;
+	else
+		val |= BCM_NO_ANEG_APD_EN;
+
+	/* Enable energy detect single link pulse for easy wakeup */
+	val |= BCM_APD_SINGLELP_EN;
+
+	/* Enable Auto Power-Down (APD) for the PHY */
+	return bcm_phy_write_shadow(phydev, BCM54XX_SHD_APD, val);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
+
+int bcm_phy_enable_eee(struct phy_device *phydev)
+{
+	int val;
+
+	/* Enable EEE at PHY level */
+	val = phy_read_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
+				    MDIO_MMD_AN, phydev->addr);
+	if (val < 0)
+		return val;
+
+	val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X;
+
+	phy_write_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
+			       MDIO_MMD_AN,  phydev->addr, (u32)val);
+
+	/* Advertise EEE */
+	val = phy_read_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV,
+				    MDIO_MMD_AN, phydev->addr);
+	if (val < 0)
+		return val;
+
+	val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
+
+	phy_write_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV,
+			       MDIO_MMD_AN,  phydev->addr, (u32)val);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_enable_eee);
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
new file mode 100644
index 0000000..b2091c8
--- /dev/null
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BCM_PHY_LIB_H
+#define _LINUX_BCM_PHY_LIB_H
+
+#include <linux/phy.h>
+
+int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
+int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
+
+int bcm_phy_write_misc(struct phy_device *phydev,
+		       u16 reg, u16 chl, u16 value);
+int bcm_phy_read_misc(struct phy_device *phydev,
+		      u16 reg, u16 chl);
+
+int bcm_phy_write_shadow(struct phy_device *phydev, u16 shadow,
+			 u16 val);
+int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow);
+
+int bcm_phy_ack_intr(struct phy_device *phydev);
+int bcm_phy_config_intr(struct phy_device *phydev);
+
+int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down);
+
+int bcm_phy_enable_eee(struct phy_device *phydev);
+#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 830ec31..86b2805 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -6,6 +6,7 @@
  *	as published by the Free Software Foundation; either version
  *	2 of the License, or (at your option) any later version.
  */
+#include "bcm-phy-lib.h"
 #include <linux/module.h>
 #include <linux/phy.h>
 
@@ -42,35 +43,6 @@
 	return phy_write(phydev, MII_BCM63XX_IR, reg);
 }
 
-static int bcm63xx_ack_interrupt(struct phy_device *phydev)
-{
-	int reg;
-
-	/* Clear pending interrupts.  */
-	reg = phy_read(phydev, MII_BCM63XX_IR);
-	if (reg < 0)
-		return reg;
-
-	return 0;
-}
-
-static int bcm63xx_config_intr(struct phy_device *phydev)
-{
-	int reg, err;
-
-	reg = phy_read(phydev, MII_BCM63XX_IR);
-	if (reg < 0)
-		return reg;
-
-	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
-		reg &= ~MII_BCM63XX_IR_GMASK;
-	else
-		reg |= MII_BCM63XX_IR_GMASK;
-
-	err = phy_write(phydev, MII_BCM63XX_IR, reg);
-	return err;
-}
-
 static struct phy_driver bcm63xx_driver[] = {
 {
 	.phy_id		= 0x00406000,
@@ -82,8 +54,8 @@
 	.config_init	= bcm63xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm63xx_ack_interrupt,
-	.config_intr	= bcm63xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	/* same phy as above, with just a different OUI */
@@ -95,8 +67,8 @@
 	.config_init	= bcm63xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm63xx_ack_interrupt,
-	.config_intr	= bcm63xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 } };
 
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 6b701b3..03d4809 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -12,12 +12,12 @@
 #include <linux/module.h>
 #include <linux/phy.h>
 #include <linux/delay.h>
+#include "bcm-phy-lib.h"
 #include <linux/bitops.h>
 #include <linux/brcmphy.h>
 #include <linux/mdio.h>
 
 /* Broadcom BCM7xxx internal PHY registers */
-#define MII_BCM7XXX_CHANNEL_WIDTH	0x2000
 
 /* 40nm only register definitions */
 #define MII_BCM7XXX_100TX_AUX_CTL	0x10
@@ -25,7 +25,6 @@
 #define MII_BCM7XXX_100TX_DISC		0x14
 #define MII_BCM7XXX_AUX_MODE		0x1d
 #define  MII_BCM7XX_64CLK_MDIO		BIT(12)
-#define MII_BCM7XXX_CORE_BASE1E		0x1e
 #define MII_BCM7XXX_TEST		0x1f
 #define  MII_BCM7XXX_SHD_MODE_2		BIT(2)
 
@@ -46,39 +45,13 @@
 #define AFE_VDAC_OTHERS_0		MISC_ADDR(0x39, 3)
 #define AFE_HPF_TRIM_OTHERS		MISC_ADDR(0x3a, 0)
 
-#define CORE_EXPB0			0xb0
-
-static void phy_write_exp(struct phy_device *phydev,
-					u16 reg, u16 value)
-{
-	phy_write(phydev, MII_BCM54XX_EXP_SEL, MII_BCM54XX_EXP_SEL_ER | reg);
-	phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
-}
-
-static void phy_write_misc(struct phy_device *phydev,
-					u16 reg, u16 chl, u16 value)
-{
-	int tmp;
-
-	phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
-
-	tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
-	tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
-	phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
-
-	tmp = (chl * MII_BCM7XXX_CHANNEL_WIDTH) | reg;
-	phy_write(phydev, MII_BCM54XX_EXP_SEL, tmp);
-
-	phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
-}
-
 static void r_rc_cal_reset(struct phy_device *phydev)
 {
 	/* Reset R_CAL/RC_CAL Engine */
-	phy_write_exp(phydev, 0x00b0, 0x0010);
+	bcm_phy_write_exp(phydev, 0x00b0, 0x0010);
 
 	/* Disable Reset R_AL/RC_CAL Engine */
-	phy_write_exp(phydev, 0x00b0, 0x0000);
+	bcm_phy_write_exp(phydev, 0x00b0, 0x0000);
 }
 
 static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
@@ -86,38 +59,38 @@
 	/* Increase VCO range to prevent unlocking problem of PLL at low
 	 * temp
 	 */
-	phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
+	bcm_phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
 
 	/* Change Ki to 011 */
-	phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
+	bcm_phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
 
 	/* Disable loading of TVCO buffer to bandgap, set bandgap trim
 	 * to 111
 	 */
-	phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
+	bcm_phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
 
 	/* Adjust bias current trim by -3 */
-	phy_write_misc(phydev, DSP_TAP10, 0x690b);
+	bcm_phy_write_misc(phydev, DSP_TAP10, 0x690b);
 
 	/* Switch to CORE_BASE1E */
-	phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0xd);
+	phy_write(phydev, MII_BRCM_CORE_BASE1E, 0xd);
 
 	r_rc_cal_reset(phydev);
 
 	/* write AFE_RXCONFIG_0 */
-	phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
+	bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
 
 	/* write AFE_RXCONFIG_1 */
-	phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
+	bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
 
 	/* write AFE_RX_LP_COUNTER */
-	phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
+	bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
 
 	/* write AFE_HPF_TRIM_OTHERS */
-	phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
+	bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
 
 	/* write AFTE_TX_CONFIG */
-	phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
+	bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
 
 	return 0;
 }
@@ -125,36 +98,36 @@
 static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
 {
 	/* AFE_RXCONFIG_0 */
-	phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb15);
+	bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb15);
 
 	/* AFE_RXCONFIG_1 */
-	phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
+	bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
 
 	/* AFE_RXCONFIG_2, set rCal offset for HT=0 code and LT=-2 code */
-	phy_write_misc(phydev, AFE_RXCONFIG_2, 0x2003);
+	bcm_phy_write_misc(phydev, AFE_RXCONFIG_2, 0x2003);
 
 	/* AFE_RX_LP_COUNTER, set RX bandwidth to maximum */
-	phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
+	bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
 
 	/* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
-	phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
+	bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
 
 	/* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
-	phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
+	bcm_phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
 
 	/* AFE_VDAC_OTHERS_0, set 1000BT Cidac=010 for all ports */
-	phy_write_misc(phydev, AFE_VDAC_OTHERS_0, 0xa020);
+	bcm_phy_write_misc(phydev, AFE_VDAC_OTHERS_0, 0xa020);
 
 	/* AFE_HPF_TRIM_OTHERS, set 100Tx/10BT to -4.5% swing and set rCal
 	 * offset for HT=0 code
 	 */
-	phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3);
+	bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3);
 
 	/* CORE_BASE1E, force trim to overwrite and set I_ext trim to 0000 */
-	phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0x0010);
+	phy_write(phydev, MII_BRCM_CORE_BASE1E, 0x0010);
 
 	/* DSP_TAP10, adjust bias current trim (+0% swing, +0 tick) */
-	phy_write_misc(phydev, DSP_TAP10, 0x011b);
+	bcm_phy_write_misc(phydev, DSP_TAP10, 0x011b);
 
 	/* Reset R_CAL/RC_CAL engine */
 	r_rc_cal_reset(phydev);
@@ -165,24 +138,24 @@
 static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
 {
 	/* AFE_RXCONFIG_1, provide more margin for INL/DNL measurement */
-	phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
+	bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
 
 	/* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
-	phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
+	bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
 
 	/* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
-	phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
+	bcm_phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
 
 	/* AFE_HPF_TRIM_OTHERS, set 100Tx/10BT to -4.5% swing and set rCal
 	 * offset for HT=0 code
 	 */
-	phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3);
+	bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3);
 
 	/* CORE_BASE1E, force trim to overwrite and set I_ext trim to 0000 */
-	phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0x0010);
+	phy_write(phydev, MII_BRCM_CORE_BASE1E, 0x0010);
 
 	/* DSP_TAP10, adjust bias current trim (+0% swing, +0 tick) */
-	phy_write_misc(phydev, DSP_TAP10, 0x011b);
+	bcm_phy_write_misc(phydev, DSP_TAP10, 0x011b);
 
 	/* Reset R_CAL/RC_CAL engine */
 	r_rc_cal_reset(phydev);
@@ -190,53 +163,6 @@
 	return 0;
 }
 
-static int bcm7xxx_apd_enable(struct phy_device *phydev)
-{
-	int val;
-
-	/* Enable powering down of the DLL during auto-power down */
-	val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_SCR3);
-	if (val < 0)
-		return val;
-
-	val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
-	bcm54xx_shadow_write(phydev, BCM54XX_SHD_SCR3, val);
-
-	/* Enable auto-power down */
-	val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_APD);
-	if (val < 0)
-		return val;
-
-	val |= BCM54XX_SHD_APD_EN;
-	return bcm54xx_shadow_write(phydev, BCM54XX_SHD_APD, val);
-}
-
-static int bcm7xxx_eee_enable(struct phy_device *phydev)
-{
-	int val;
-
-	val = phy_read_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
-				    MDIO_MMD_AN, phydev->addr);
-	if (val < 0)
-		return val;
-
-	/* Enable general EEE feature at the PHY level */
-	val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X;
-
-	phy_write_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
-			       MDIO_MMD_AN, phydev->addr, val);
-
-	/* Advertise supported modes */
-	val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
-				    MDIO_MMD_AN, phydev->addr);
-
-	val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
-	phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
-			       MDIO_MMD_AN, phydev->addr, val);
-
-	return 0;
-}
-
 static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
 {
 	u8 rev = PHY_BRCM_7XXX_REV(phydev->dev_flags);
@@ -273,11 +199,11 @@
 	if (ret)
 		return ret;
 
-	ret = bcm7xxx_eee_enable(phydev);
+	ret = bcm_phy_enable_eee(phydev);
 	if (ret)
 		return ret;
 
-	return bcm7xxx_apd_enable(phydev);
+	return bcm_phy_enable_apd(phydev, true);
 }
 
 static int bcm7xxx_28nm_resume(struct phy_device *phydev)
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 9c71295..07a6119 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -14,6 +14,7 @@
  *	2 of the License, or (at your option) any later version.
  */
 
+#include "bcm-phy-lib.h"
 #include <linux/module.h>
 #include <linux/phy.h>
 #include <linux/brcmphy.h>
@@ -29,39 +30,6 @@
 MODULE_AUTHOR("Maciej W. Rozycki");
 MODULE_LICENSE("GPL");
 
-/* Indirect register access functions for the Expansion Registers */
-static int bcm54xx_exp_read(struct phy_device *phydev, u16 regnum)
-{
-	int val;
-
-	val = phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum);
-	if (val < 0)
-		return val;
-
-	val = phy_read(phydev, MII_BCM54XX_EXP_DATA);
-
-	/* Restore default value.  It's O.K. if this write fails. */
-	phy_write(phydev, MII_BCM54XX_EXP_SEL, 0);
-
-	return val;
-}
-
-static int bcm54xx_exp_write(struct phy_device *phydev, u16 regnum, u16 val)
-{
-	int ret;
-
-	ret = phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum);
-	if (ret < 0)
-		return ret;
-
-	ret = phy_write(phydev, MII_BCM54XX_EXP_DATA, val);
-
-	/* Restore default value.  It's O.K. if this write fails. */
-	phy_write(phydev, MII_BCM54XX_EXP_SEL, 0);
-
-	return ret;
-}
-
 static int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val)
 {
 	return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val);
@@ -72,28 +40,28 @@
 {
 	int err;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH0,
+	err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_AADJ1CH0,
 				MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN |
 				MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF);
 	if (err < 0)
 		return err;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH3,
-					MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ);
+	err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_AADJ1CH3,
+				MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ);
 	if (err < 0)
 		return err;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75,
+	err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP75,
 				MII_BCM54XX_EXP_EXP75_VDACCTRL);
 	if (err < 0)
 		return err;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP96,
+	err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP96,
 				MII_BCM54XX_EXP_EXP96_MYST);
 	if (err < 0)
 		return err;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP97,
+	err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP97,
 				MII_BCM54XX_EXP_EXP97_MYST);
 
 	return err;
@@ -114,7 +82,7 @@
 	if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
 	    BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) {
 		/* Clear bit 9 to fix a phy interop issue. */
-		err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08,
+		err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP08,
 					MII_BCM54XX_EXP_EXP08_RJCT_2MHZ);
 		if (err < 0)
 			goto error;
@@ -129,12 +97,12 @@
 	if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
 		int val;
 
-		val = bcm54xx_exp_read(phydev, MII_BCM54XX_EXP_EXP75);
+		val = bcm_phy_read_exp(phydev, MII_BCM54XX_EXP_EXP75);
 		if (val < 0)
 			goto error;
 
 		val |= MII_BCM54XX_EXP_EXP75_CM_OSC;
-		err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, val);
+		err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP75, val);
 	}
 
 error:
@@ -159,7 +127,7 @@
 	    BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
 		return;
 
-	val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_SCR3);
+	val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
 	if (val < 0)
 		return;
 
@@ -190,9 +158,9 @@
 		val |= BCM54XX_SHD_SCR3_TRDDAPD;
 
 	if (orig != val)
-		bcm54xx_shadow_write(phydev, BCM54XX_SHD_SCR3, val);
+		bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val);
 
-	val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_APD);
+	val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_APD);
 	if (val < 0)
 		return;
 
@@ -204,7 +172,7 @@
 		val &= ~BCM54XX_SHD_APD_EN;
 
 	if (orig != val)
-		bcm54xx_shadow_write(phydev, BCM54XX_SHD_APD, val);
+		bcm_phy_write_shadow(phydev, BCM54XX_SHD_APD, val);
 }
 
 static int bcm54xx_config_init(struct phy_device *phydev)
@@ -232,7 +200,7 @@
 	if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
 	     BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
 	    (phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE))
-		bcm54xx_shadow_write(phydev, BCM54XX_SHD_RGMII_MODE, 0);
+		bcm_phy_write_shadow(phydev, BCM54XX_SHD_RGMII_MODE, 0);
 
 	if ((phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) ||
 	    (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) ||
@@ -254,8 +222,8 @@
 		/*
 		 * Enable secondary SerDes and its use as an LED source
 		 */
-		reg = bcm54xx_shadow_read(phydev, BCM5482_SHD_SSD);
-		bcm54xx_shadow_write(phydev, BCM5482_SHD_SSD,
+		reg = bcm_phy_read_shadow(phydev, BCM5482_SHD_SSD);
+		bcm_phy_write_shadow(phydev, BCM5482_SHD_SSD,
 				     reg |
 				     BCM5482_SHD_SSD_LEDM |
 				     BCM5482_SHD_SSD_EN);
@@ -264,10 +232,10 @@
 		 * Enable SGMII slave mode and auto-detection
 		 */
 		reg = BCM5482_SSD_SGMII_SLAVE | MII_BCM54XX_EXP_SEL_SSD;
-		err = bcm54xx_exp_read(phydev, reg);
+		err = bcm_phy_read_exp(phydev, reg);
 		if (err < 0)
 			return err;
-		err = bcm54xx_exp_write(phydev, reg, err |
+		err = bcm_phy_write_exp(phydev, reg, err |
 					BCM5482_SSD_SGMII_SLAVE_EN |
 					BCM5482_SSD_SGMII_SLAVE_AD);
 		if (err < 0)
@@ -277,10 +245,10 @@
 		 * Disable secondary SerDes powerdown
 		 */
 		reg = BCM5482_SSD_1000BX_CTL | MII_BCM54XX_EXP_SEL_SSD;
-		err = bcm54xx_exp_read(phydev, reg);
+		err = bcm_phy_read_exp(phydev, reg);
 		if (err < 0)
 			return err;
-		err = bcm54xx_exp_write(phydev, reg,
+		err = bcm_phy_write_exp(phydev, reg,
 					err & ~BCM5482_SSD_1000BX_CTL_PWRDOWN);
 		if (err < 0)
 			return err;
@@ -288,15 +256,15 @@
 		/*
 		 * Select 1000BASE-X register set (primary SerDes)
 		 */
-		reg = bcm54xx_shadow_read(phydev, BCM5482_SHD_MODE);
-		bcm54xx_shadow_write(phydev, BCM5482_SHD_MODE,
+		reg = bcm_phy_read_shadow(phydev, BCM5482_SHD_MODE);
+		bcm_phy_write_shadow(phydev, BCM5482_SHD_MODE,
 				     reg | BCM5482_SHD_MODE_1000BX);
 
 		/*
 		 * LED1=ACTIVITYLED, LED3=LINKSPD[2]
 		 * (Use LED1 as secondary SerDes ACTIVITY LED)
 		 */
-		bcm54xx_shadow_write(phydev, BCM5482_SHD_LEDS1,
+		bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1,
 			BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_ACTIVITYLED) |
 			BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_LINKSPD2));
 
@@ -334,35 +302,6 @@
 	return err;
 }
 
-static int bcm54xx_ack_interrupt(struct phy_device *phydev)
-{
-	int reg;
-
-	/* Clear pending interrupts.  */
-	reg = phy_read(phydev, MII_BCM54XX_ISR);
-	if (reg < 0)
-		return reg;
-
-	return 0;
-}
-
-static int bcm54xx_config_intr(struct phy_device *phydev)
-{
-	int reg, err;
-
-	reg = phy_read(phydev, MII_BCM54XX_ECR);
-	if (reg < 0)
-		return reg;
-
-	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
-		reg &= ~MII_BCM54XX_ECR_IM;
-	else
-		reg |= MII_BCM54XX_ECR_IM;
-
-	err = phy_write(phydev, MII_BCM54XX_ECR, reg);
-	return err;
-}
-
 static int bcm5481_config_aneg(struct phy_device *phydev)
 {
 	int ret;
@@ -519,8 +458,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM5421,
@@ -532,8 +471,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM5461,
@@ -545,8 +484,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM54616S,
@@ -558,8 +497,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM5464,
@@ -571,8 +510,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM5481,
@@ -584,8 +523,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= bcm5481_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM5482,
@@ -597,8 +536,8 @@
 	.config_init	= bcm5482_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= bcm5482_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM50610,
@@ -610,8 +549,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM50610M,
@@ -623,8 +562,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM57780,
@@ -636,8 +575,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCMAC131,
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index fb1299c..e23bf5b 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -220,7 +220,7 @@
 	struct fixed_mdio_bus *fmb = &platform_fmb;
 	struct fixed_phy *fp;
 
-	if (!phydev || !phydev->bus)
+	if (!phydev || phydev->bus != fmb->mii_bus)
 		return -EINVAL;
 
 	list_for_each_entry(fp, &fmb->phys, node) {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e6897b6..5de8d58 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -785,6 +785,7 @@
 	int adv;
 	int err;
 	int lpa;
+	int lpagb;
 	int status = 0;
 
 	/* Update the link, but return if there
@@ -802,10 +803,17 @@
 		if (lpa < 0)
 			return lpa;
 
+		lpagb = phy_read(phydev, MII_STAT1000);
+		if (lpagb < 0)
+			return lpagb;
+
 		adv = phy_read(phydev, MII_ADVERTISE);
 		if (adv < 0)
 			return adv;
 
+		phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) |
+					 mii_lpa_to_ethtool_lpa_t(lpa);
+
 		lpa &= adv;
 
 		if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
@@ -853,6 +861,7 @@
 			phydev->speed = SPEED_10;
 
 		phydev->pause = phydev->asym_pause = 0;
+		phydev->lp_advertising = 0;
 	}
 
 	return 0;
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
new file mode 100644
index 0000000..c0b4e65
--- /dev/null
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+
+#define IPROC_GPHY_MDCDIV    0x1a
+
+#define MII_CTRL_OFFSET      0x000
+
+#define MII_CTRL_DIV_SHIFT   0
+#define MII_CTRL_PRE_SHIFT   7
+#define MII_CTRL_BUSY_SHIFT  8
+
+#define MII_DATA_OFFSET      0x004
+#define MII_DATA_MASK        0xffff
+#define MII_DATA_TA_SHIFT    16
+#define MII_DATA_TA_VAL      2
+#define MII_DATA_RA_SHIFT    18
+#define MII_DATA_PA_SHIFT    23
+#define MII_DATA_OP_SHIFT    28
+#define MII_DATA_OP_WRITE    1
+#define MII_DATA_OP_READ     2
+#define MII_DATA_SB_SHIFT    30
+
+struct iproc_mdio_priv {
+	struct mii_bus *mii_bus;
+	void __iomem *base;
+};
+
+static inline int iproc_mdio_wait_for_idle(void __iomem *base)
+{
+	u32 val;
+	unsigned int timeout = 1000; /* loop for 1s */
+
+	do {
+		val = readl(base + MII_CTRL_OFFSET);
+		if ((val & BIT(MII_CTRL_BUSY_SHIFT)) == 0)
+			return 0;
+
+		usleep_range(1000, 2000);
+	} while (timeout--);
+
+	return -ETIMEDOUT;
+}
+
+static inline void iproc_mdio_config_clk(void __iomem *base)
+{
+	u32 val;
+
+	val = (IPROC_GPHY_MDCDIV << MII_CTRL_DIV_SHIFT) |
+		  BIT(MII_CTRL_PRE_SHIFT);
+	writel(val, base + MII_CTRL_OFFSET);
+}
+
+static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+	struct iproc_mdio_priv *priv = bus->priv;
+	u32 cmd;
+	int rc;
+
+	rc = iproc_mdio_wait_for_idle(priv->base);
+	if (rc)
+		return rc;
+
+	iproc_mdio_config_clk(priv->base);
+
+	/* Prepare the read operation */
+	cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
+		(reg << MII_DATA_RA_SHIFT) |
+		(phy_id << MII_DATA_PA_SHIFT) |
+		BIT(MII_DATA_SB_SHIFT) |
+		(MII_DATA_OP_READ << MII_DATA_OP_SHIFT);
+
+	writel(cmd, priv->base + MII_DATA_OFFSET);
+
+	rc = iproc_mdio_wait_for_idle(priv->base);
+	if (rc)
+		return rc;
+
+	cmd = readl(priv->base + MII_DATA_OFFSET) & MII_DATA_MASK;
+
+	return cmd;
+}
+
+static int iproc_mdio_write(struct mii_bus *bus, int phy_id,
+			    int reg, u16 val)
+{
+	struct iproc_mdio_priv *priv = bus->priv;
+	u32 cmd;
+	int rc;
+
+	rc = iproc_mdio_wait_for_idle(priv->base);
+	if (rc)
+		return rc;
+
+	iproc_mdio_config_clk(priv->base);
+
+	/* Prepare the write operation */
+	cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
+		(reg << MII_DATA_RA_SHIFT) |
+		(phy_id << MII_DATA_PA_SHIFT) |
+		BIT(MII_DATA_SB_SHIFT) |
+		(MII_DATA_OP_WRITE << MII_DATA_OP_SHIFT) |
+		((u32)(val) & MII_DATA_MASK);
+
+	writel(cmd, priv->base + MII_DATA_OFFSET);
+
+	rc = iproc_mdio_wait_for_idle(priv->base);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int iproc_mdio_probe(struct platform_device *pdev)
+{
+	struct iproc_mdio_priv *priv;
+	struct mii_bus *bus;
+	struct resource *res;
+	int rc;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->base)) {
+		dev_err(&pdev->dev, "failed to ioremap register\n");
+		return PTR_ERR(priv->base);
+	}
+
+	priv->mii_bus = mdiobus_alloc();
+	if (!priv->mii_bus) {
+		dev_err(&pdev->dev, "MDIO bus alloc failed\n");
+		return -ENOMEM;
+	}
+
+	bus = priv->mii_bus;
+	bus->priv = priv;
+	bus->name = "iProc MDIO bus";
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
+	bus->parent = &pdev->dev;
+	bus->read = iproc_mdio_read;
+	bus->write = iproc_mdio_write;
+
+	rc = of_mdiobus_register(bus, pdev->dev.of_node);
+	if (rc) {
+		dev_err(&pdev->dev, "MDIO bus registration failed\n");
+		goto err_iproc_mdio;
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	dev_info(&pdev->dev, "Broadcom iProc MDIO bus at 0x%p\n", priv->base);
+
+	return 0;
+
+err_iproc_mdio:
+	mdiobus_free(bus);
+	return rc;
+}
+
+static int iproc_mdio_remove(struct platform_device *pdev)
+{
+	struct iproc_mdio_priv *priv = platform_get_drvdata(pdev);
+
+	mdiobus_unregister(priv->mii_bus);
+	mdiobus_free(priv->mii_bus);
+
+	return 0;
+}
+
+static const struct of_device_id iproc_mdio_of_match[] = {
+	{ .compatible = "brcm,iproc-mdio", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, iproc_mdio_of_match);
+
+static struct platform_driver iproc_mdio_driver = {
+	.driver = {
+		.name = "iproc-mdio",
+		.of_match_table = iproc_mdio_of_match,
+	},
+	.probe = iproc_mdio_probe,
+	.remove = iproc_mdio_remove,
+};
+
+module_platform_driver(iproc_mdio_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom iProc MDIO bus controller");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:iproc-mdio");
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 6a52a7f..4bde5e7 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -244,6 +244,7 @@
 	{ .compatible = "brcm,unimac-mdio", },
 	{ /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, unimac_mdio_ids);
 
 static struct platform_driver unimac_mdio_driver = {
 	.driver = {
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 7dc21e5..3bc9f03 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -261,6 +261,7 @@
 	{ .compatible = "virtual,mdio-gpio", },
 	{ /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, mdio_gpio_of_match);
 
 static struct platform_driver mdio_gpio_driver = {
 	.probe = mdio_gpio_probe,
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 4d4d25e..280c7c3 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -113,18 +113,18 @@
 	if (!parent_bus_node)
 		return -ENODEV;
 
-	parent_bus = of_mdio_find_bus(parent_bus_node);
-	if (parent_bus == NULL) {
-		ret_val = -EPROBE_DEFER;
-		goto err_parent_bus;
-	}
-
 	pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
 	if (pb == NULL) {
 		ret_val = -ENOMEM;
 		goto err_parent_bus;
 	}
 
+	parent_bus = of_mdio_find_bus(parent_bus_node);
+	if (parent_bus == NULL) {
+		ret_val = -EPROBE_DEFER;
+		goto err_parent_bus;
+	}
+
 	pb->switch_data = data;
 	pb->switch_fn = switch_fn;
 	pb->current_child = -1;
@@ -173,6 +173,10 @@
 		dev_info(dev, "Version " DRV_VERSION "\n");
 		return 0;
 	}
+
+	/* balance the reference of_mdio_find_bus() took */
+	put_device(&pb->mii_bus->dev);
+
 err_parent_bus:
 	of_node_put(parent_bus_node);
 	return ret_val;
@@ -189,6 +193,9 @@
 		mdiobus_free(cb->mii_bus);
 		cb = cb->next;
 	}
+
+	/* balance the reference of_mdio_find_bus() in mdio_mux_init() took */
+	put_device(&pb->mii_bus->dev);
 }
 EXPORT_SYMBOL_GPL(mdio_mux_uninit);
 
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 02a4615..12f44c5 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -167,7 +167,9 @@
  * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
  * @mdio_bus_np: Pointer to the mii_bus.
  *
- * Returns a pointer to the mii_bus, or NULL if none found.
+ * Returns a reference to the mii_bus, or NULL if none found.  The
+ * embedded struct device will have its reference count incremented,
+ * and this must be put once the bus is finished with.
  *
  * Because the association of a device_node and mii_bus is made via
  * of_mdiobus_register(), the mii_bus cannot be found before it is
@@ -234,15 +236,18 @@
 #endif
 
 /**
- * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
+ * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
  * @bus: target mii_bus
+ * @owner: module containing bus accessor functions
  *
  * Description: Called by a bus driver to bring up all the PHYs
- *   on a given bus, and attach them to the bus.
+ *   on a given bus, and attach them to the bus. Drivers should use
+ *   mdiobus_register() rather than __mdiobus_register() unless they
+ *   need to pass a specific owner module.
  *
  * Returns 0 on success or < 0 on error.
  */
-int mdiobus_register(struct mii_bus *bus)
+int __mdiobus_register(struct mii_bus *bus, struct module *owner)
 {
 	int i, err;
 
@@ -253,6 +258,7 @@
 	BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
 	       bus->state != MDIOBUS_UNREGISTERED);
 
+	bus->owner = owner;
 	bus->dev.parent = bus->parent;
 	bus->dev.class = &mdio_bus_class;
 	bus->dev.groups = NULL;
@@ -288,13 +294,16 @@
 
 error:
 	while (--i >= 0) {
-		if (bus->phy_map[i])
-			device_unregister(&bus->phy_map[i]->dev);
+		struct phy_device *phydev = bus->phy_map[i];
+		if (phydev) {
+			phy_device_remove(phydev);
+			phy_device_free(phydev);
+		}
 	}
 	device_del(&bus->dev);
 	return err;
 }
-EXPORT_SYMBOL(mdiobus_register);
+EXPORT_SYMBOL(__mdiobus_register);
 
 void mdiobus_unregister(struct mii_bus *bus)
 {
@@ -304,9 +313,11 @@
 	bus->state = MDIOBUS_UNREGISTERED;
 
 	for (i = 0; i < PHY_MAX_ADDR; i++) {
-		if (bus->phy_map[i])
-			device_unregister(&bus->phy_map[i]->dev);
-		bus->phy_map[i] = NULL;
+		struct phy_device *phydev = bus->phy_map[i];
+		if (phydev) {
+			phy_device_remove(phydev);
+			phy_device_free(phydev);
+		}
 	}
 	device_del(&bus->dev);
 }
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c0f2111..3833891 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -384,6 +384,24 @@
 EXPORT_SYMBOL(phy_device_register);
 
 /**
+ * phy_device_remove - Remove a previously registered phy device from the MDIO bus
+ * @phydev: phy_device structure to remove
+ *
+ * This doesn't free the phy_device itself, it merely reverses the effects
+ * of phy_device_register(). Use phy_device_free() to free the device
+ * after calling this function.
+ */
+void phy_device_remove(struct phy_device *phydev)
+{
+	struct mii_bus *bus = phydev->bus;
+	int addr = phydev->addr;
+
+	device_del(&phydev->dev);
+	bus->phy_map[addr] = NULL;
+}
+EXPORT_SYMBOL(phy_device_remove);
+
+/**
  * phy_find_first - finds the first PHY device on the bus
  * @bus: the target MII bus
  */
@@ -578,14 +596,22 @@
  *     generic driver is used.  The phy_device is given a ptr to
  *     the attaching device, and given a callback for link status
  *     change.  The phy_device is returned to the attaching driver.
+ *     This function takes a reference on the phy device.
  */
 int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
 		      u32 flags, phy_interface_t interface)
 {
+	struct mii_bus *bus = phydev->bus;
 	struct device *d = &phydev->dev;
-	struct module *bus_module;
 	int err;
 
+	if (!try_module_get(bus->owner)) {
+		dev_err(&dev->dev, "failed to get the bus module\n");
+		return -EIO;
+	}
+
+	get_device(d);
+
 	/* Assume that if there is no driver, that it doesn't
 	 * exist, and we should use the genphy driver.
 	 */
@@ -600,20 +626,13 @@
 			err = device_bind_driver(d);
 
 		if (err)
-			return err;
+			goto error;
 	}
 
 	if (phydev->attached_dev) {
 		dev_err(&dev->dev, "PHY already attached\n");
-		return -EBUSY;
-	}
-
-	/* Increment the bus module reference count */
-	bus_module = phydev->bus->dev.driver ?
-		     phydev->bus->dev.driver->owner : NULL;
-	if (!try_module_get(bus_module)) {
-		dev_err(&dev->dev, "failed to get the bus module\n");
-		return -EIO;
+		err = -EBUSY;
+		goto error;
 	}
 
 	phydev->attached_dev = dev;
@@ -636,6 +655,11 @@
 		phy_resume(phydev);
 
 	return err;
+
+error:
+	put_device(d);
+	module_put(bus->owner);
+	return err;
 }
 EXPORT_SYMBOL(phy_attach_direct);
 
@@ -677,14 +701,15 @@
 /**
  * phy_detach - detach a PHY device from its network device
  * @phydev: target phy_device struct
+ *
+ * This detaches the phy device from its network device and the phy
+ * driver, and drops the reference count taken in phy_attach_direct().
  */
 void phy_detach(struct phy_device *phydev)
 {
+	struct mii_bus *bus;
 	int i;
 
-	if (phydev->bus->dev.driver)
-		module_put(phydev->bus->dev.driver->owner);
-
 	phydev->attached_dev->phydev = NULL;
 	phydev->attached_dev = NULL;
 	phy_suspend(phydev);
@@ -700,6 +725,15 @@
 			break;
 		}
 	}
+
+	/*
+	 * The phydev might go away on the put_device() below, so avoid
+	 * a use-after-free bug by reading the underlying bus first.
+	 */
+	bus = phydev->bus;
+
+	put_device(&phydev->dev);
+	module_put(bus->owner);
 }
 EXPORT_SYMBOL(phy_detach);
 
@@ -1205,6 +1239,44 @@
 	return 0;
 }
 
+static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+{
+	/* The default values for phydev->supported are provided by the PHY
+	 * driver "features" member, we want to reset to sane defaults first
+	 * before supporting higher speeds.
+	 */
+	phydev->supported &= PHY_DEFAULT_FEATURES;
+
+	switch (max_speed) {
+	default:
+		return -ENOTSUPP;
+	case SPEED_1000:
+		phydev->supported |= PHY_1000BT_FEATURES;
+		/* fall through */
+	case SPEED_100:
+		phydev->supported |= PHY_100BT_FEATURES;
+		/* fall through */
+	case SPEED_10:
+		phydev->supported |= PHY_10BT_FEATURES;
+	}
+
+	return 0;
+}
+
+int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
+{
+	int err;
+
+	err = __set_phy_supported(phydev, max_speed);
+	if (err)
+		return err;
+
+	phydev->advertising = phydev->supported;
+
+	return 0;
+}
+EXPORT_SYMBOL(phy_set_max_speed);
+
 static void of_set_phy_supported(struct phy_device *phydev)
 {
 	struct device_node *node = phydev->dev.of_node;
@@ -1216,25 +1288,8 @@
 	if (!node)
 		return;
 
-	if (!of_property_read_u32(node, "max-speed", &max_speed)) {
-		/* The default values for phydev->supported are provided by the PHY
-		 * driver "features" member, we want to reset to sane defaults fist
-		 * before supporting higher speeds.
-		 */
-		phydev->supported &= PHY_DEFAULT_FEATURES;
-
-		switch (max_speed) {
-		default:
-			return;
-
-		case SPEED_1000:
-			phydev->supported |= PHY_1000BT_FEATURES;
-		case SPEED_100:
-			phydev->supported |= PHY_100BT_FEATURES;
-		case SPEED_10:
-			phydev->supported |= PHY_10BT_FEATURES;
-		}
-	}
+	if (!of_property_read_u32(node, "max-speed", &max_speed))
+		__set_phy_supported(phydev, max_speed);
 }
 
 /**
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 17cad18..76cad71 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -66,7 +66,6 @@
 #define PHY_ID_VSC8244			0x000fc6c0
 #define PHY_ID_VSC8514			0x00070670
 #define PHY_ID_VSC8574			0x000704a0
-#define PHY_ID_VSC8641			0x00070431
 #define PHY_ID_VSC8662			0x00070660
 #define PHY_ID_VSC8221			0x000fc550
 #define PHY_ID_VSC8211			0x000fc4b0
@@ -273,18 +272,6 @@
 	.config_intr    = &vsc82xx_config_intr,
 	.driver         = { .owner = THIS_MODULE,},
 }, {
-	.phy_id         = PHY_ID_VSC8641,
-	.name           = "Vitesse VSC8641",
-	.phy_id_mask    = 0x000ffff0,
-	.features       = PHY_GBIT_FEATURES,
-	.flags          = PHY_HAS_INTERRUPT,
-	.config_init    = &vsc824x_config_init,
-	.config_aneg    = &vsc82x4_config_aneg,
-	.read_status    = &genphy_read_status,
-	.ack_interrupt  = &vsc824x_ack_interrupt,
-	.config_intr    = &vsc82xx_config_intr,
-	.driver         = { .owner = THIS_MODULE,},
-}, {
 	.phy_id         = PHY_ID_VSC8662,
 	.name           = "Vitesse VSC8662",
 	.phy_id_mask    = 0x000ffff0,
@@ -331,7 +318,6 @@
 	{ PHY_ID_VSC8244, 0x000fffc0 },
 	{ PHY_ID_VSC8514, 0x000ffff0 },
 	{ PHY_ID_VSC8574, 0x000ffff0 },
-	{ PHY_ID_VSC8641, 0x000ffff0 },
 	{ PHY_ID_VSC8662, 0x000ffff0 },
 	{ PHY_ID_VSC8221, 0x000ffff0 },
 	{ PHY_ID_VSC8211, 0x000ffff0 },
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 0481daf..ed00446 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2755,6 +2755,7 @@
 	 */
 	dev_net_set(dev, net);
 
+	rtnl_lock();
 	mutex_lock(&pn->all_ppp_mutex);
 
 	if (unit < 0) {
@@ -2785,7 +2786,7 @@
 	ppp->file.index = unit;
 	sprintf(dev->name, "ppp%d", unit);
 
-	ret = register_netdev(dev);
+	ret = register_netdevice(dev);
 	if (ret != 0) {
 		unit_put(&pn->units_idr, unit);
 		netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
@@ -2797,6 +2798,7 @@
 
 	atomic_inc(&ppp_unit_count);
 	mutex_unlock(&pn->all_ppp_mutex);
+	rtnl_unlock();
 
 	*retp = 0;
 	return ppp;
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 686f37d..fc69e41 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -169,6 +169,7 @@
 {
 	struct sock *sk = (struct sock *) chan->private;
 	struct pppox_sock *po = pppox_sk(sk);
+	struct net *net = sock_net(sk);
 	struct pptp_opt *opt = &po->proto.pptp;
 	struct pptp_gre_header *hdr;
 	unsigned int header_len = sizeof(*hdr);
@@ -187,7 +188,7 @@
 	if (sk_pppox(po)->sk_state & PPPOX_DEAD)
 		goto tx_error;
 
-	rt = ip_route_output_ports(sock_net(sk), &fl4, NULL,
+	rt = ip_route_output_ports(net, &fl4, NULL,
 				   opt->dst_addr.sin_addr.s_addr,
 				   opt->src_addr.sin_addr.s_addr,
 				   0, 0, IPPROTO_GRE,
@@ -279,10 +280,10 @@
 	nf_reset(skb);
 
 	skb->ip_summed = CHECKSUM_NONE;
-	ip_select_ident(sock_net(sk), skb, NULL);
+	ip_select_ident(net, skb, NULL);
 	ip_send_check(iph);
 
-	ip_local_out(skb);
+	ip_local_out(net, skb->sk, skb);
 	return 1;
 
 tx_error:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 976aa97..b1878fa 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -858,7 +858,7 @@
 	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
 		goto drop;
 
-	if (skb->sk) {
+	if (skb->sk && sk_fullsock(skb->sk)) {
 		sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
 		sw_tx_timestamp(skb);
 	}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 167cfc5..3a8a36c 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -585,4 +585,15 @@
 
 	  http://ubuntuforums.org/showpost.php?p=10589647&postcount=17
 
+config USB_NET_CH9200
+	tristate "QingHeng CH9200 USB ethernet support"
+	depends on USB_USBNET
+	select MII
+	help
+	  Choose this option if you have a USB ethernet adapter with a QinHeng
+	  CH9200 chipset.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ch9200.
+
 endif # USB_NET_DRIVERS
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index cf6a0e6..b5f0406 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -38,4 +38,4 @@
 obj-$(CONFIG_USB_VL600)		+= lg-vl600.o
 obj-$(CONFIG_USB_NET_QMI_WWAN)	+= qmi_wwan.o
 obj-$(CONFIG_USB_NET_CDC_MBIM)	+= cdc_mbim.o
-
+obj-$(CONFIG_USB_NET_CH9200)	+= ch9200.o
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 5d049d0..a2d3ea6 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -168,7 +168,7 @@
 struct asix_rx_fixup_info {
 	struct sk_buff *ax_skb;
 	u32 header;
-	u16 size;
+	u16 remaining;
 	bool split_head;
 };
 
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 75d6f26..a186b0a 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -54,69 +54,101 @@
 			   struct asix_rx_fixup_info *rx)
 {
 	int offset = 0;
+	u16 size;
+
+	/* When an Ethernet frame spans multiple URB socket buffers,
+	 * do a sanity test for the Data header synchronisation.
+	 * Attempt to detect the situation of the previous socket buffer having
+	 * been truncated or a socket buffer was missing. These situations
+	 * cause a discontinuity in the data stream and therefore need to avoid
+	 * appending bad data to the end of the current netdev socket buffer.
+	 * Also avoid unnecessarily discarding a good current netdev socket
+	 * buffer.
+	 */
+	if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
+		offset = ((rx->remaining + 1) & 0xfffe) + sizeof(u32);
+		rx->header = get_unaligned_le32(skb->data + offset);
+		offset = 0;
+
+		size = (u16)(rx->header & 0x7ff);
+		if (size != ((~rx->header >> 16) & 0x7ff)) {
+			netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
+				   rx->remaining);
+			if (rx->ax_skb) {
+				kfree_skb(rx->ax_skb);
+				rx->ax_skb = NULL;
+				/* Discard the incomplete netdev Ethernet frame
+				 * and assume the Data header is at the start of
+				 * the current URB socket buffer.
+				 */
+			}
+			rx->remaining = 0;
+		}
+	}
 
 	while (offset + sizeof(u16) <= skb->len) {
-		u16 remaining = 0;
+		u16 copy_length;
 		unsigned char *data;
 
-		if (!rx->size) {
-			if ((skb->len - offset == sizeof(u16)) ||
-			    rx->split_head) {
-				if(!rx->split_head) {
-					rx->header = get_unaligned_le16(
-							skb->data + offset);
-					rx->split_head = true;
-					offset += sizeof(u16);
-					break;
-				} else {
-					rx->header |= (get_unaligned_le16(
-							skb->data + offset)
-							<< 16);
-					rx->split_head = false;
-					offset += sizeof(u16);
-				}
+		if (!rx->remaining) {
+			if (skb->len - offset == sizeof(u16)) {
+				rx->header = get_unaligned_le16(
+						skb->data + offset);
+				rx->split_head = true;
+				offset += sizeof(u16);
+				break;
+			}
+
+			if (rx->split_head == true) {
+				rx->header |= (get_unaligned_le16(
+						skb->data + offset) << 16);
+				rx->split_head = false;
+				offset += sizeof(u16);
 			} else {
 				rx->header = get_unaligned_le32(skb->data +
 								offset);
 				offset += sizeof(u32);
 			}
 
-			/* get the packet length */
-			rx->size = (u16) (rx->header & 0x7ff);
-			if (rx->size != ((~rx->header >> 16) & 0x7ff)) {
+			/* take frame length from Data header 32-bit word */
+			size = (u16)(rx->header & 0x7ff);
+			if (size != ((~rx->header >> 16) & 0x7ff)) {
 				netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
 					   rx->header, offset);
-				rx->size = 0;
 				return 0;
 			}
-			rx->ax_skb = netdev_alloc_skb_ip_align(dev->net,
-							       rx->size);
-			if (!rx->ax_skb)
+			if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
+				netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
+					   size);
 				return 0;
+			}
+
+			/* Sometimes may fail to get a netdev socket buffer but
+			 * continue to process the URB socket buffer so that
+			 * synchronisation of the Ethernet frame Data header
+			 * word is maintained.
+			 */
+			rx->ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
+
+			rx->remaining = size;
 		}
 
-		if (rx->size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
-			netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
-				   rx->size);
-			kfree_skb(rx->ax_skb);
-			rx->ax_skb = NULL;
-			rx->size = 0U;
-
-			return 0;
+		if (rx->remaining > skb->len - offset) {
+			copy_length = skb->len - offset;
+			rx->remaining -= copy_length;
+		} else {
+			copy_length = rx->remaining;
+			rx->remaining = 0;
 		}
 
-		if (rx->size > skb->len - offset) {
-			remaining = rx->size - (skb->len - offset);
-			rx->size = skb->len - offset;
+		if (rx->ax_skb) {
+			data = skb_put(rx->ax_skb, copy_length);
+			memcpy(data, skb->data + offset, copy_length);
+			if (!rx->remaining)
+				usbnet_skb_return(dev, rx->ax_skb);
 		}
 
-		data = skb_put(rx->ax_skb, rx->size);
-		memcpy(data, skb->data + offset, rx->size);
-		if (!remaining)
-			usbnet_skb_return(dev, rx->ax_skb);
-
-		offset += (rx->size + 1) & 0xfffe;
-		rx->size = remaining;
+		offset += (copy_length + 1) & 0xfffe;
 	}
 
 	if (skb->len != offset) {
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index efc18e0..bbde988 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -342,7 +342,7 @@
 	in6_dev_put(in6_dev);
 
 	/* ipv6_stub != NULL if in6_dev_get returned an inet6_dev */
-	ipv6_stub->ndisc_send_na(netdev, NULL, &iph->saddr, &msg->target,
+	ipv6_stub->ndisc_send_na(netdev, &iph->saddr, &msg->target,
 				 is_router /* router */,
 				 true /* solicited */,
 				 false /* override */,
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
new file mode 100644
index 0000000..5e151e6
--- /dev/null
+++ b/drivers/net/usb/ch9200.c
@@ -0,0 +1,432 @@
+/*
+ * USB 10M/100M ethernet adapter
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
+
+#define CH9200_VID		0x1A86
+#define CH9200_PID_E092		0xE092
+
+#define CTRL_TIMEOUT_MS		1000
+
+#define CONTROL_TIMEOUT_MS 1000
+
+#define REQUEST_READ	0x0E
+#define REQUEST_WRITE	0x0F
+
+/* Address space:
+ * 00-63 : MII
+ * 64-128: MAC
+ *
+ * Note: all accesses must be 16-bit
+ */
+
+#define MAC_REG_CTRL 64
+#define MAC_REG_STATUS 66
+#define MAC_REG_INTERRUPT_MASK 68
+#define MAC_REG_PHY_COMMAND 70
+#define MAC_REG_PHY_DATA 72
+#define MAC_REG_STATION_L 74
+#define MAC_REG_STATION_M 76
+#define MAC_REG_STATION_H 78
+#define MAC_REG_HASH_L 80
+#define MAC_REG_HASH_M1 82
+#define MAC_REG_HASH_M2 84
+#define MAC_REG_HASH_H 86
+#define MAC_REG_THRESHOLD 88
+#define MAC_REG_FIFO_DEPTH 90
+#define MAC_REG_PAUSE 92
+#define MAC_REG_FLOW_CONTROL 94
+
+/* Control register bits
+ *
+ * Note: bits 13 and 15 are reserved
+ */
+#define LOOPBACK		(0x01 << 14)
+#define BASE100X		(0x01 << 12)
+#define MBPS_10			(0x01 << 11)
+#define DUPLEX_MODE		(0x01 << 10)
+#define PAUSE_FRAME		(0x01 << 9)
+#define PROMISCUOUS		(0x01 << 8)
+#define MULTICAST		(0x01 << 7)
+#define BROADCAST		(0x01 << 6)
+#define HASH			(0x01 << 5)
+#define APPEND_PAD		(0x01 << 4)
+#define APPEND_CRC		(0x01 << 3)
+#define TRANSMITTER_ACTION	(0x01 << 2)
+#define RECEIVER_ACTION		(0x01 << 1)
+#define DMA_ACTION		(0x01 << 0)
+
+/* Status register bits
+ *
+ * Note: bits 7-15 are reserved
+ */
+#define ALIGNMENT		(0x01 << 6)
+#define FIFO_OVER_RUN		(0x01 << 5)
+#define FIFO_UNDER_RUN		(0x01 << 4)
+#define RX_ERROR		(0x01 << 3)
+#define RX_COMPLETE		(0x01 << 2)
+#define TX_ERROR		(0x01 << 1)
+#define TX_COMPLETE		(0x01 << 0)
+
+/* FIFO depth register bits
+ *
+ * Note: bits 6 and 14 are reserved
+ */
+
+#define ETH_TXBD		(0x01 << 15)
+#define ETN_TX_FIFO_DEPTH	(0x01 << 8)
+#define ETH_RXBD		(0x01 << 7)
+#define ETH_RX_FIFO_DEPTH	(0x01 << 0)
+
+static int control_read(struct usbnet *dev,
+			unsigned char request, unsigned short value,
+			unsigned short index, void *data, unsigned short size,
+			int timeout)
+{
+	unsigned char *buf = NULL;
+	unsigned char request_type;
+	int err = 0;
+
+	if (request == REQUEST_READ)
+		request_type = (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER);
+	else
+		request_type = (USB_DIR_IN | USB_TYPE_VENDOR |
+				USB_RECIP_DEVICE);
+
+	netdev_dbg(dev->net, "Control_read() index=0x%02x size=%d\n",
+		   index, size);
+
+	buf = kmalloc(size, GFP_KERNEL);
+	if (!buf) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	err = usb_control_msg(dev->udev,
+			      usb_rcvctrlpipe(dev->udev, 0),
+			      request, request_type, value, index, buf, size,
+			      timeout);
+	if (err == size)
+		memcpy(data, buf, size);
+	else if (err >= 0)
+		err = -EINVAL;
+	kfree(buf);
+
+	return err;
+
+err_out:
+	return err;
+}
+
+static int control_write(struct usbnet *dev, unsigned char request,
+			 unsigned short value, unsigned short index,
+			 void *data, unsigned short size, int timeout)
+{
+	unsigned char *buf = NULL;
+	unsigned char request_type;
+	int err = 0;
+
+	if (request == REQUEST_WRITE)
+		request_type = (USB_DIR_OUT | USB_TYPE_VENDOR |
+				USB_RECIP_OTHER);
+	else
+		request_type = (USB_DIR_OUT | USB_TYPE_VENDOR |
+				USB_RECIP_DEVICE);
+
+	netdev_dbg(dev->net, "Control_write() index=0x%02x size=%d\n",
+		   index, size);
+
+	if (data) {
+		buf = kmalloc(size, GFP_KERNEL);
+		if (!buf) {
+			err = -ENOMEM;
+			goto err_out;
+		}
+		memcpy(buf, data, size);
+	}
+
+	err = usb_control_msg(dev->udev,
+			      usb_sndctrlpipe(dev->udev, 0),
+			      request, request_type, value, index, buf, size,
+			      timeout);
+	if (err >= 0 && err < size)
+		err = -EINVAL;
+	kfree(buf);
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
+{
+	struct usbnet *dev = netdev_priv(netdev);
+	unsigned char buff[2];
+
+	netdev_dbg(netdev, "ch9200_mdio_read phy_id:%02x loc:%02x\n",
+		   phy_id, loc);
+
+	if (phy_id != 0)
+		return -ENODEV;
+
+	control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
+		     CONTROL_TIMEOUT_MS);
+
+	return (buff[0] | buff[1] << 8);
+}
+
+static void ch9200_mdio_write(struct net_device *netdev,
+			      int phy_id, int loc, int val)
+{
+	struct usbnet *dev = netdev_priv(netdev);
+	unsigned char buff[2];
+
+	netdev_dbg(netdev, "ch9200_mdio_write() phy_id=%02x loc:%02x\n",
+		   phy_id, loc);
+
+	if (phy_id != 0)
+		return;
+
+	buff[0] = (unsigned char)val;
+	buff[1] = (unsigned char)(val >> 8);
+
+	control_write(dev, REQUEST_WRITE, 0, loc * 2, buff, 0x02,
+		      CONTROL_TIMEOUT_MS);
+}
+
+static int ch9200_link_reset(struct usbnet *dev)
+{
+	struct ethtool_cmd ecmd;
+
+	mii_check_media(&dev->mii, 1, 1);
+	mii_ethtool_gset(&dev->mii, &ecmd);
+
+	netdev_dbg(dev->net, "link_reset() speed:%d duplex:%d\n",
+		   ecmd.speed, ecmd.duplex);
+
+	return 0;
+}
+
+static void ch9200_status(struct usbnet *dev, struct urb *urb)
+{
+	int link;
+	unsigned char *buf;
+
+	if (urb->actual_length < 16)
+		return;
+
+	buf = urb->transfer_buffer;
+	link = !!(buf[0] & 0x01);
+
+	if (link) {
+		netif_carrier_on(dev->net);
+		usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+	} else {
+		netif_carrier_off(dev->net);
+	}
+}
+
+static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+				       gfp_t flags)
+{
+	int i = 0;
+	int len = 0;
+	int tx_overhead = 0;
+
+	tx_overhead = 0x40;
+
+	len = skb->len;
+	if (skb_headroom(skb) < tx_overhead) {
+		struct sk_buff *skb2;
+
+		skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
+		dev_kfree_skb_any(skb);
+		skb = skb2;
+		if (!skb)
+			return NULL;
+	}
+
+	__skb_push(skb, tx_overhead);
+	/* usbnet adds padding if length is a multiple of packet size
+	 * if so, adjust length value in header
+	 */
+	if ((skb->len % dev->maxpacket) == 0)
+		len++;
+
+	skb->data[0] = len;
+	skb->data[1] = len >> 8;
+	skb->data[2] = 0x00;
+	skb->data[3] = 0x80;
+
+	for (i = 4; i < 48; i++)
+		skb->data[i] = 0x00;
+
+	skb->data[48] = len;
+	skb->data[49] = len >> 8;
+	skb->data[50] = 0x00;
+	skb->data[51] = 0x80;
+
+	for (i = 52; i < 64; i++)
+		skb->data[i] = 0x00;
+
+	return skb;
+}
+
+static int ch9200_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+	int len = 0;
+	int rx_overhead = 0;
+
+	rx_overhead = 64;
+
+	if (unlikely(skb->len < rx_overhead)) {
+		dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
+		return 0;
+	}
+
+	len = (skb->data[skb->len - 16] | skb->data[skb->len - 15] << 8);
+	skb_trim(skb, len);
+
+	return 1;
+}
+
+static int get_mac_address(struct usbnet *dev, unsigned char *data)
+{
+	int err = 0;
+	unsigned char mac_addr[0x06];
+	int rd_mac_len = 0;
+
+	netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n",
+		   dev->udev->descriptor.idVendor,
+		   dev->udev->descriptor.idProduct);
+
+	memset(mac_addr, 0, sizeof(mac_addr));
+	rd_mac_len = control_read(dev, REQUEST_READ, 0,
+				  MAC_REG_STATION_L, mac_addr, 0x02,
+				  CONTROL_TIMEOUT_MS);
+	rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_M,
+				   mac_addr + 2, 0x02, CONTROL_TIMEOUT_MS);
+	rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_H,
+				   mac_addr + 4, 0x02, CONTROL_TIMEOUT_MS);
+	if (rd_mac_len != ETH_ALEN)
+		err = -EINVAL;
+
+	data[0] = mac_addr[5];
+	data[1] = mac_addr[4];
+	data[2] = mac_addr[3];
+	data[3] = mac_addr[2];
+	data[4] = mac_addr[1];
+	data[5] = mac_addr[0];
+
+	return err;
+}
+
+static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+	int retval = 0;
+	unsigned char data[2];
+
+	retval = usbnet_get_endpoints(dev, intf);
+	if (retval)
+		return retval;
+
+	dev->mii.dev = dev->net;
+	dev->mii.mdio_read = ch9200_mdio_read;
+	dev->mii.mdio_write = ch9200_mdio_write;
+	dev->mii.reg_num_mask = 0x1f;
+
+	dev->mii.phy_id_mask = 0x1f;
+
+	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+	dev->rx_urb_size = 24 * 64 + 16;
+	mii_nway_restart(&dev->mii);
+
+	data[0] = 0x01;
+	data[1] = 0x0F;
+	retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_THRESHOLD, data,
+			       0x02, CONTROL_TIMEOUT_MS);
+
+	data[0] = 0xA0;
+	data[1] = 0x90;
+	retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FIFO_DEPTH, data,
+			       0x02, CONTROL_TIMEOUT_MS);
+
+	data[0] = 0x30;
+	data[1] = 0x00;
+	retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_PAUSE, data,
+			       0x02, CONTROL_TIMEOUT_MS);
+
+	data[0] = 0x17;
+	data[1] = 0xD8;
+	retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FLOW_CONTROL,
+			       data, 0x02, CONTROL_TIMEOUT_MS);
+
+	/* Undocumented register */
+	data[0] = 0x01;
+	data[1] = 0x00;
+	retval = control_write(dev, REQUEST_WRITE, 0, 254, data, 0x02,
+			       CONTROL_TIMEOUT_MS);
+
+	data[0] = 0x5F;
+	data[1] = 0x0D;
+	retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02,
+			       CONTROL_TIMEOUT_MS);
+
+	retval = get_mac_address(dev, dev->net->dev_addr);
+
+	return retval;
+}
+
+static const struct driver_info ch9200_info = {
+	.description = "CH9200 USB to Network Adaptor",
+	.flags = FLAG_ETHER,
+	.bind = ch9200_bind,
+	.rx_fixup = ch9200_rx_fixup,
+	.tx_fixup = ch9200_tx_fixup,
+	.status = ch9200_status,
+	.link_reset = ch9200_link_reset,
+	.reset = ch9200_link_reset,
+};
+
+static const struct usb_device_id ch9200_products[] = {
+	{
+	 USB_DEVICE(0x1A86, 0xE092),
+	 .driver_info = (unsigned long)&ch9200_info,
+	 },
+	{},
+};
+
+MODULE_DEVICE_TABLE(usb, ch9200_products);
+
+static struct usb_driver ch9200_driver = {
+	.name = "ch9200",
+	.id_table = ch9200_products,
+	.probe = usbnet_probe,
+	.disconnect = usbnet_disconnect,
+	.suspend = usbnet_suspend,
+	.resume = usbnet_resume,
+};
+
+module_usb_driver(ch9200_driver);
+
+MODULE_DESCRIPTION("QinHeng CH9200 USB Network device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 6a70a72..226668e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2424,11 +2424,6 @@
 			skb_pull(skb, align_count);
 	}
 
-	if (unlikely(skb->len < 0)) {
-		netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
-		return 0;
-	}
-
 	return 1;
 }
 
@@ -3209,7 +3204,6 @@
 	int ret;
 	int event;
 
-	ret = 0;
 	event = message.event;
 
 	if (!dev->suspend_count++) {
@@ -3291,6 +3285,7 @@
 		}
 	}
 
+	ret = 0;
 out:
 	return ret;
 }
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index d9e7892..30033db 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -2185,11 +2185,6 @@
 			skb_pull(skb, align_count);
 	}
 
-	if (unlikely(skb->len < 0)) {
-		netdev_warn(dev->net, "invalid rx length<0 %d\n", skb->len);
-		return 0;
-	}
-
 	return 1;
 }
 
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 26423ad..66b3ab9 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1815,11 +1815,6 @@
 			skb_pull(skb, align_count);
 	}
 
-	if (unlikely(skb->len < 0)) {
-		netdev_warn(dev->net, "invalid rx length<0 %d\n", skb->len);
-		return 0;
-	}
-
 	return 1;
 }
 
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index c1d0e7a..a681569 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -183,16 +183,22 @@
 }
 
 
-/* Should be multiple of 4 */
-#define NUM_TX_REGS	8
-#define NUM_RX_REGS	12
-
+/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with
+ * the version 2 of the vmxnet3 support for ethtool(8) --register-dump.
+ * Therefore, if any registers are added, removed or modified, then a version
+ * bump and a corresponding change in the vmxnet3 support for ethtool(8)
+ * --register-dump would be required.
+ */
 static int
 vmxnet3_get_regs_len(struct net_device *netdev)
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
-	return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
-		adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
+
+	return ((9 /* BAR1 registers */ +
+		(1 + adapter->intr.num_intrs) +
+		(1 + adapter->num_tx_queues * 17 /* Tx queue registers */) +
+		(1 + adapter->num_rx_queues * 23 /* Rx queue registers */)) *
+		sizeof(u32));
 }
 
 
@@ -342,6 +348,12 @@
 }
 
 
+/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with
+ * the version 2 of the vmxnet3 support for ethtool(8) --register-dump.
+ * Therefore, if any registers are added, removed or modified, then a version
+ * bump and a corresponding change in the vmxnet3 support for ethtool(8)
+ * --register-dump would be required.
+ */
 static void
 vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
 {
@@ -351,40 +363,90 @@
 
 	memset(p, 0, vmxnet3_get_regs_len(netdev));
 
-	regs->version = 1;
+	regs->version = 2;
 
 	/* Update vmxnet3_get_regs_len if we want to dump more registers */
 
-	/* make each ring use multiple of 16 bytes */
+	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
+	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
+	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAL);
+	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAH);
+	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
+	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
+	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
+	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ECR);
+
+	buf[j++] = adapter->intr.num_intrs;
+	for (i = 0; i < adapter->intr.num_intrs; i++) {
+		buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_IMR
+						 + i * VMXNET3_REG_ALIGN);
+	}
+
+	buf[j++] = adapter->num_tx_queues;
 	for (i = 0; i < adapter->num_tx_queues; i++) {
-		buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
-		buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
-		buf[j++] = adapter->tx_queue[i].tx_ring.gen;
-		buf[j++] = 0;
+		struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
 
-		buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
-		buf[j++] = adapter->tx_queue[i].comp_ring.gen;
-		buf[j++] = adapter->tx_queue[i].stopped;
-		buf[j++] = 0;
+		buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_TXPROD +
+						 i * VMXNET3_REG_ALIGN);
+
+		buf[j++] = VMXNET3_GET_ADDR_LO(tq->tx_ring.basePA);
+		buf[j++] = VMXNET3_GET_ADDR_HI(tq->tx_ring.basePA);
+		buf[j++] = tq->tx_ring.size;
+		buf[j++] = tq->tx_ring.next2fill;
+		buf[j++] = tq->tx_ring.next2comp;
+		buf[j++] = tq->tx_ring.gen;
+
+		buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA);
+		buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA);
+		buf[j++] = tq->data_ring.size;
+		/* transmit data ring buffer size */
+		buf[j++] = VMXNET3_HDR_COPY_SIZE;
+
+		buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA);
+		buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA);
+		buf[j++] = tq->comp_ring.size;
+		buf[j++] = tq->comp_ring.next2proc;
+		buf[j++] = tq->comp_ring.gen;
+
+		buf[j++] = tq->stopped;
 	}
 
+	buf[j++] = adapter->num_rx_queues;
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
-		buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
-		buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
+		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+
+		buf[j++] =  VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_RXPROD +
+						  i * VMXNET3_REG_ALIGN);
+		buf[j++] =  VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_RXPROD2 +
+						  i * VMXNET3_REG_ALIGN);
+
+		buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA);
+		buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA);
+		buf[j++] = rq->rx_ring[0].size;
+		buf[j++] = rq->rx_ring[0].next2fill;
+		buf[j++] = rq->rx_ring[0].next2comp;
+		buf[j++] = rq->rx_ring[0].gen;
+
+		buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[1].basePA);
+		buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[1].basePA);
+		buf[j++] = rq->rx_ring[1].size;
+		buf[j++] = rq->rx_ring[1].next2fill;
+		buf[j++] = rq->rx_ring[1].next2comp;
+		buf[j++] = rq->rx_ring[1].gen;
+
+		/* receive data ring */
+		buf[j++] = 0;
+		buf[j++] = 0;
+		buf[j++] = 0;
 		buf[j++] = 0;
 
-		buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
-		buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
-		buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
-		buf[j++] = 0;
-
-		buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
-		buf[j++] = adapter->rx_queue[i].comp_ring.gen;
-		buf[j++] = 0;
-		buf[j++] = 0;
+		buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA);
+		buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA);
+		buf[j++] = rq->comp_ring.size;
+		buf[j++] = rq->comp_ring.next2proc;
+		buf[j++] = rq->comp_ring.gen;
 	}
-
 }
 
 
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 2652245..3f859a5 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.2.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.3.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040200
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040300
 
 #if defined(CONFIG_PCI_MSI)
 	/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 637e9fd..191579a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -34,16 +34,32 @@
 #include <net/rtnetlink.h>
 #include <net/route.h>
 #include <net/addrconf.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
+
+#define RT_FL_TOS(oldflp4) \
+	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
 
 #define DRV_NAME	"vrf"
 #define DRV_VERSION	"1.0"
 
-#define vrf_is_slave(dev)   ((dev)->flags & IFF_SLAVE)
-
 #define vrf_master_get_rcu(dev) \
 	((struct net_device *)rcu_dereference(dev->rx_handler_data))
 
+struct slave {
+	struct list_head        list;
+	struct net_device       *dev;
+};
+
+struct slave_queue {
+	struct list_head        all_slaves;
+};
+
+struct net_vrf {
+	struct slave_queue      queue;
+	struct rtable           *rth;
+	u32                     tb_id;
+};
+
 struct pcpu_dstats {
 	u64			tx_pkts;
 	u64			tx_bytes;
@@ -58,9 +74,9 @@
 	return dst;
 }
 
-static int vrf_ip_local_out(struct sk_buff *skb)
+static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	return ip_local_out(skb);
+	return ip_local_out(net, sk, skb);
 }
 
 static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
@@ -193,7 +209,8 @@
 		.flowi4_oif = vrf_dev->ifindex,
 		.flowi4_iif = LOOPBACK_IFINDEX,
 		.flowi4_tos = RT_TOS(ip4h->tos),
-		.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC,
+		.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_L3MDEV_SRC |
+				FLOWI_FLAG_SKIP_NH_OIF,
 		.daddr = ip4h->daddr,
 	};
 
@@ -205,7 +222,7 @@
 					       RT_SCOPE_LINK);
 	}
 
-	ret = ip_local_out(skb);
+	ret = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
 	if (unlikely(net_xmit_eval(ret)))
 		vrf_dev->stats.tx_errors++;
 	else
@@ -295,10 +312,9 @@
 	return ret;
 }
 
-static int vrf_output(struct sock *sk, struct sk_buff *skb)
+static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct net_device *dev = skb_dst(skb)->dev;
-	struct net *net = dev_net(dev);
 
 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
 
@@ -394,18 +410,15 @@
 
 static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
 {
-	struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL);
 	struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL);
 	struct net_vrf *vrf = netdev_priv(dev);
 	struct slave_queue *queue = &vrf->queue;
 	int ret = -ENOMEM;
 
-	if (!slave || !vrf_ptr)
+	if (!slave)
 		goto out_fail;
 
 	slave->dev = port_dev;
-	vrf_ptr->ifindex = dev->ifindex;
-	vrf_ptr->tb_id = vrf->tb_id;
 
 	/* register the packet handler for slave ports */
 	ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
@@ -420,9 +433,8 @@
 	if (ret < 0)
 		goto out_unregister;
 
-	port_dev->flags |= IFF_SLAVE;
+	port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
 	__vrf_insert_slave(queue, slave);
-	rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr);
 	cycle_netdev(port_dev);
 
 	return 0;
@@ -430,14 +442,13 @@
 out_unregister:
 	netdev_rx_handler_unregister(port_dev);
 out_fail:
-	kfree(vrf_ptr);
 	kfree(slave);
 	return ret;
 }
 
 static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
 {
-	if (netif_is_vrf(port_dev) || vrf_is_slave(port_dev))
+	if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev))
 		return -EINVAL;
 
 	return do_vrf_add_slave(dev, port_dev);
@@ -446,21 +457,15 @@
 /* inverse of do_vrf_add_slave */
 static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
 {
-	struct net_vrf_dev *vrf_ptr = rtnl_dereference(port_dev->vrf_ptr);
 	struct net_vrf *vrf = netdev_priv(dev);
 	struct slave_queue *queue = &vrf->queue;
 	struct slave *slave;
 
-	RCU_INIT_POINTER(port_dev->vrf_ptr, NULL);
-
 	netdev_upper_dev_unlink(port_dev, dev);
-	port_dev->flags &= ~IFF_SLAVE;
+	port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
 
 	netdev_rx_handler_unregister(port_dev);
 
-	/* after netdev_rx_handler_unregister for synchronize_rcu */
-	kfree(vrf_ptr);
-
 	cycle_netdev(port_dev);
 
 	slave = __vrf_find_slave_dev(queue, port_dev);
@@ -528,6 +533,65 @@
 	.ndo_del_slave		= vrf_del_slave,
 };
 
+static u32 vrf_fib_table(const struct net_device *dev)
+{
+	struct net_vrf *vrf = netdev_priv(dev);
+
+	return vrf->tb_id;
+}
+
+static struct rtable *vrf_get_rtable(const struct net_device *dev,
+				     const struct flowi4 *fl4)
+{
+	struct rtable *rth = NULL;
+
+	if (!(fl4->flowi4_flags & FLOWI_FLAG_L3MDEV_SRC)) {
+		struct net_vrf *vrf = netdev_priv(dev);
+
+		rth = vrf->rth;
+		atomic_inc(&rth->dst.__refcnt);
+	}
+
+	return rth;
+}
+
+/* called under rcu_read_lock */
+static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
+{
+	struct fib_result res = { .tclassid = 0 };
+	struct net *net = dev_net(dev);
+	u32 orig_tos = fl4->flowi4_tos;
+	u8 flags = fl4->flowi4_flags;
+	u8 scope = fl4->flowi4_scope;
+	u8 tos = RT_FL_TOS(fl4);
+
+	if (unlikely(!fl4->daddr))
+		return;
+
+	fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF;
+	fl4->flowi4_iif = LOOPBACK_IFINDEX;
+	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
+	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
+			     RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
+
+	if (!fib_lookup(net, fl4, &res, 0)) {
+		if (res.type == RTN_LOCAL)
+			fl4->saddr = res.fi->fib_prefsrc ? : fl4->daddr;
+		else
+			fib_select_path(net, &res, fl4, -1);
+	}
+
+	fl4->flowi4_flags = flags;
+	fl4->flowi4_tos = orig_tos;
+	fl4->flowi4_scope = scope;
+}
+
+static const struct l3mdev_ops vrf_l3mdev_ops = {
+	.l3mdev_fib_table	= vrf_fib_table,
+	.l3mdev_get_rtable	= vrf_get_rtable,
+	.l3mdev_get_saddr	= vrf_get_saddr,
+};
+
 static void vrf_get_drvinfo(struct net_device *dev,
 			    struct ethtool_drvinfo *info)
 {
@@ -545,6 +609,7 @@
 
 	/* Initialize the device structure. */
 	dev->netdev_ops = &vrf_netdev_ops;
+	dev->l3mdev_ops = &vrf_l3mdev_ops;
 	dev->ethtool_ops = &vrf_ethtool_ops;
 	dev->destructor = free_netdev;
 
@@ -571,10 +636,6 @@
 
 static void vrf_dellink(struct net_device *dev, struct list_head *head)
 {
-	struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
-
-	RCU_INIT_POINTER(dev->vrf_ptr, NULL);
-	kfree_rcu(vrf_ptr, rcu);
 	unregister_netdevice_queue(dev, head);
 }
 
@@ -582,7 +643,6 @@
 		       struct nlattr *tb[], struct nlattr *data[])
 {
 	struct net_vrf *vrf = netdev_priv(dev);
-	struct net_vrf_dev *vrf_ptr;
 	int err;
 
 	if (!data || !data[IFLA_VRF_TABLE])
@@ -590,26 +650,15 @@
 
 	vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
 
-	dev->priv_flags |= IFF_VRF_MASTER;
-
-	err = -ENOMEM;
-	vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL);
-	if (!vrf_ptr)
-		goto out_fail;
-
-	vrf_ptr->ifindex = dev->ifindex;
-	vrf_ptr->tb_id = vrf->tb_id;
+	dev->priv_flags |= IFF_L3MDEV_MASTER;
 
 	err = register_netdevice(dev);
 	if (err < 0)
 		goto out_fail;
 
-	rcu_assign_pointer(dev->vrf_ptr, vrf_ptr);
-
 	return 0;
 
 out_fail:
-	kfree(vrf_ptr);
 	free_netdev(dev);
 	return err;
 }
@@ -653,10 +702,9 @@
 
 	/* only care about unregister events to drop slave references */
 	if (event == NETDEV_UNREGISTER) {
-		struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
 		struct net_device *vrf_dev;
 
-		if (!vrf_ptr || netif_is_vrf(dev))
+		if (!netif_is_l3_slave(dev))
 			goto out;
 
 		vrf_dev = netdev_master_upper_dev_get(dev);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index cf8b7f0..ce704df 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -75,8 +75,7 @@
 
 static const u8 all_zeros_mac[ETH_ALEN];
 
-static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
-					 bool no_share, u32 flags);
+static int vxlan_sock_add(struct vxlan_dev *vxlan);
 
 /* per-network namespace private data for this module */
 struct vxlan_net {
@@ -994,19 +993,30 @@
 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
 {
 	struct vxlan_dev *vxlan;
+	unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
 
 	/* The vxlan_sock is only used by dev, leaving group has
 	 * no effect on other vxlan devices.
 	 */
-	if (atomic_read(&dev->vn_sock->refcnt) == 1)
+	if (family == AF_INET && dev->vn4_sock &&
+	    atomic_read(&dev->vn4_sock->refcnt) == 1)
 		return false;
+#if IS_ENABLED(CONFIG_IPV6)
+	if (family == AF_INET6 && dev->vn6_sock &&
+	    atomic_read(&dev->vn6_sock->refcnt) == 1)
+		return false;
+#endif
 
 	list_for_each_entry(vxlan, &vn->vxlan_list, next) {
 		if (!netif_running(vxlan->dev) || vxlan == dev)
 			continue;
 
-		if (vxlan->vn_sock != dev->vn_sock)
+		if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock)
 			continue;
+#if IS_ENABLED(CONFIG_IPV6)
+		if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock)
+			continue;
+#endif
 
 		if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
 				      &dev->default_dst.remote_ip))
@@ -1022,15 +1032,16 @@
 	return false;
 }
 
-static void vxlan_sock_release(struct vxlan_sock *vs)
+static void __vxlan_sock_release(struct vxlan_sock *vs)
 {
-	struct sock *sk = vs->sock->sk;
-	struct net *net = sock_net(sk);
-	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+	struct vxlan_net *vn;
 
+	if (!vs)
+		return;
 	if (!atomic_dec_and_test(&vs->refcnt))
 		return;
 
+	vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
 	spin_lock(&vn->sock_lock);
 	hlist_del_rcu(&vs->hlist);
 	vxlan_notify_del_rx_port(vs);
@@ -1039,32 +1050,43 @@
 	queue_work(vxlan_wq, &vs->del_work);
 }
 
+static void vxlan_sock_release(struct vxlan_dev *vxlan)
+{
+	__vxlan_sock_release(vxlan->vn4_sock);
+#if IS_ENABLED(CONFIG_IPV6)
+	__vxlan_sock_release(vxlan->vn6_sock);
+#endif
+}
+
 /* Update multicast group membership when first VNI on
  * multicast address is brought up
  */
 static int vxlan_igmp_join(struct vxlan_dev *vxlan)
 {
-	struct vxlan_sock *vs = vxlan->vn_sock;
-	struct sock *sk = vs->sock->sk;
+	struct sock *sk;
 	union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
 	int ifindex = vxlan->default_dst.remote_ifindex;
 	int ret = -EINVAL;
 
-	lock_sock(sk);
 	if (ip->sa.sa_family == AF_INET) {
 		struct ip_mreqn mreq = {
 			.imr_multiaddr.s_addr	= ip->sin.sin_addr.s_addr,
 			.imr_ifindex		= ifindex,
 		};
 
+		sk = vxlan->vn4_sock->sock->sk;
+		lock_sock(sk);
 		ret = ip_mc_join_group(sk, &mreq);
+		release_sock(sk);
 #if IS_ENABLED(CONFIG_IPV6)
 	} else {
+		sk = vxlan->vn6_sock->sock->sk;
+		lock_sock(sk);
 		ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
 						   &ip->sin6.sin6_addr);
+		release_sock(sk);
 #endif
 	}
-	release_sock(sk);
 
 	return ret;
 }
@@ -1072,27 +1094,30 @@
 /* Inverse of vxlan_igmp_join when last VNI is brought down */
 static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
 {
-	struct vxlan_sock *vs = vxlan->vn_sock;
-	struct sock *sk = vs->sock->sk;
+	struct sock *sk;
 	union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
 	int ifindex = vxlan->default_dst.remote_ifindex;
 	int ret = -EINVAL;
 
-	lock_sock(sk);
 	if (ip->sa.sa_family == AF_INET) {
 		struct ip_mreqn mreq = {
 			.imr_multiaddr.s_addr	= ip->sin.sin_addr.s_addr,
 			.imr_ifindex		= ifindex,
 		};
 
+		sk = vxlan->vn4_sock->sock->sk;
+		lock_sock(sk);
 		ret = ip_mc_leave_group(sk, &mreq);
+		release_sock(sk);
 #if IS_ENABLED(CONFIG_IPV6)
 	} else {
+		sk = vxlan->vn6_sock->sock->sk;
+		lock_sock(sk);
 		ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
 						   &ip->sin6.sin6_addr);
+		release_sock(sk);
 #endif
 	}
-	release_sock(sk);
 
 	return ret;
 }
@@ -1873,8 +1898,7 @@
 {
 	struct ip_tunnel_info *info;
 	struct vxlan_dev *vxlan = netdev_priv(dev);
-	struct sock *sk = vxlan->vn_sock->sock->sk;
-	unsigned short family = vxlan_get_sk_family(vxlan->vn_sock);
+	struct sock *sk;
 	struct rtable *rt = NULL;
 	const struct iphdr *old_iph;
 	struct flowi4 fl4;
@@ -1901,13 +1925,10 @@
 				  dev->name);
 			goto drop;
 		}
-		if (family != ip_tunnel_info_af(info))
-			goto drop;
-
 		dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
 		vni = be64_to_cpu(info->key.tun_id);
-		remote_ip.sa.sa_family = family;
-		if (family == AF_INET)
+		remote_ip.sa.sa_family = ip_tunnel_info_af(info);
+		if (remote_ip.sa.sa_family == AF_INET)
 			remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
 		else
 			remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
@@ -1952,6 +1973,10 @@
 	}
 
 	if (dst->sa.sa_family == AF_INET) {
+		if (!vxlan->vn4_sock)
+			goto drop;
+		sk = vxlan->vn4_sock->sock->sk;
+
 		if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
 			df = htons(IP_DF);
 
@@ -2013,6 +2038,10 @@
 		struct flowi6 fl6;
 		u32 rt6i_flags;
 
+		if (!vxlan->vn6_sock)
+			goto drop;
+		sk = vxlan->vn6_sock->sock->sk;
+
 		memset(&fl6, 0, sizeof(fl6));
 		fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
 		fl6.daddr = dst->sin6.sin6_addr;
@@ -2204,7 +2233,6 @@
 	struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 	__u32 vni = vxlan->default_dst.remote_vni;
 
-	vxlan->vn_sock = vs;
 	spin_lock(&vn->sock_lock);
 	hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
 	spin_unlock(&vn->sock_lock);
@@ -2244,22 +2272,18 @@
 static int vxlan_open(struct net_device *dev)
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
-	struct vxlan_sock *vs;
-	int ret = 0;
+	int ret;
 
-	vs = vxlan_sock_add(vxlan->net, vxlan->cfg.dst_port,
-			    vxlan->cfg.no_share, vxlan->flags);
-	if (IS_ERR(vs))
-		return PTR_ERR(vs);
-
-	vxlan_vs_add_dev(vs, vxlan);
+	ret = vxlan_sock_add(vxlan);
+	if (ret < 0)
+		return ret;
 
 	if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
 		ret = vxlan_igmp_join(vxlan);
 		if (ret == -EADDRINUSE)
 			ret = 0;
 		if (ret) {
-			vxlan_sock_release(vs);
+			vxlan_sock_release(vxlan);
 			return ret;
 		}
 	}
@@ -2294,7 +2318,6 @@
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
-	struct vxlan_sock *vs = vxlan->vn_sock;
 	int ret = 0;
 
 	if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
@@ -2304,7 +2327,7 @@
 	del_timer_sync(&vxlan->age_timer);
 
 	vxlan_flush(vxlan);
-	vxlan_sock_release(vs);
+	vxlan_sock_release(vxlan);
 
 	return ret;
 }
@@ -2392,10 +2415,6 @@
 
 	eth_hw_addr_random(dev);
 	ether_setup(dev);
-	if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
-		dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
-	else
-		dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
 
 	dev->netdev_ops = &vxlan_netdev_ops;
 	dev->destructor = free_netdev;
@@ -2544,14 +2563,13 @@
 }
 
 /* Create new listen socket if needed */
-static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
-					      u32 flags)
+static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
+					      __be16 port, u32 flags)
 {
 	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
 	struct vxlan_sock *vs;
 	struct socket *sock;
 	unsigned int h;
-	bool ipv6 = !!(flags & VXLAN_F_IPV6);
 	struct udp_tunnel_sock_cfg tunnel_cfg;
 
 	vs = kzalloc(sizeof(*vs), GFP_KERNEL);
@@ -2596,27 +2614,53 @@
 	return vs;
 }
 
-static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
-					 bool no_share, u32 flags)
+static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
 {
-	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
-	struct vxlan_sock *vs;
-	bool ipv6 = flags & VXLAN_F_IPV6;
+	struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+	struct vxlan_sock *vs = NULL;
 
-	if (!no_share) {
+	if (!vxlan->cfg.no_share) {
 		spin_lock(&vn->sock_lock);
-		vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
-				     flags);
-		if (vs) {
-			if (!atomic_add_unless(&vs->refcnt, 1, 0))
-				vs = ERR_PTR(-EBUSY);
+		vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
+				     vxlan->cfg.dst_port, vxlan->flags);
+		if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) {
 			spin_unlock(&vn->sock_lock);
-			return vs;
+			return -EBUSY;
 		}
 		spin_unlock(&vn->sock_lock);
 	}
+	if (!vs)
+		vs = vxlan_socket_create(vxlan->net, ipv6,
+					 vxlan->cfg.dst_port, vxlan->flags);
+	if (IS_ERR(vs))
+		return PTR_ERR(vs);
+#if IS_ENABLED(CONFIG_IPV6)
+	if (ipv6)
+		vxlan->vn6_sock = vs;
+	else
+#endif
+		vxlan->vn4_sock = vs;
+	vxlan_vs_add_dev(vs, vxlan);
+	return 0;
+}
 
-	return vxlan_socket_create(net, port, flags);
+static int vxlan_sock_add(struct vxlan_dev *vxlan)
+{
+	bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
+	bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
+	int ret = 0;
+
+	vxlan->vn4_sock = NULL;
+#if IS_ENABLED(CONFIG_IPV6)
+	vxlan->vn6_sock = NULL;
+	if (ipv6 || metadata)
+		ret = __vxlan_sock_add(vxlan, true);
+#endif
+	if (!ret && (!ipv6 || metadata))
+		ret = __vxlan_sock_add(vxlan, false);
+	if (ret < 0)
+		vxlan_sock_release(vxlan);
+	return ret;
 }
 
 static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
@@ -2625,6 +2669,7 @@
 	struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct vxlan_rdst *dst = &vxlan->default_dst;
+	unsigned short needed_headroom = ETH_HLEN;
 	int err;
 	bool use_ipv6 = false;
 	__be16 default_port = vxlan->cfg.dst_port;
@@ -2640,8 +2685,12 @@
 		dst->remote_ip.sa.sa_family = AF_INET;
 
 	if (dst->remote_ip.sa.sa_family == AF_INET6 ||
-	    vxlan->cfg.saddr.sa.sa_family == AF_INET6)
+	    vxlan->cfg.saddr.sa.sa_family == AF_INET6) {
+		if (!IS_ENABLED(CONFIG_IPV6))
+			return -EPFNOSUPPORT;
 		use_ipv6 = true;
+		vxlan->flags |= VXLAN_F_IPV6;
+	}
 
 	if (conf->remote_ifindex) {
 		struct net_device *lowerdev
@@ -2661,17 +2710,20 @@
 				pr_info("IPv6 is disabled via sysctl\n");
 				return -EPERM;
 			}
-			vxlan->flags |= VXLAN_F_IPV6;
 		}
 #endif
 
 		if (!conf->mtu)
 			dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
 
-		dev->needed_headroom = lowerdev->hard_header_len +
-				       (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
-	} else if (use_ipv6)
-		vxlan->flags |= VXLAN_F_IPV6;
+		needed_headroom = lowerdev->hard_header_len;
+	}
+
+	if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
+		needed_headroom += VXLAN6_HEADROOM;
+	else
+		needed_headroom += VXLAN_HEADROOM;
+	dev->needed_headroom = needed_headroom;
 
 	memcpy(&vxlan->cfg, conf, sizeof(*conf));
 	if (!vxlan->cfg.dst_port)
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index a63ab2e..f9f9422 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -214,8 +214,6 @@
 
 	  If you choose to build a module, it'll be called rndis_wlan.
 
-source "drivers/net/wireless/rtl818x/Kconfig"
-
 config ADM8211
 	tristate "ADMtek ADM8211 support"
 	depends on MAC80211 && PCI
@@ -243,6 +241,8 @@
 
 	  Thanks to Infineon-ADMtek for their support of this driver.
 
+source "drivers/net/wireless/realtek/rtl818x/Kconfig"
+
 config MAC80211_HWSIM
 	tristate "Simulated radio testing tool for mac80211"
 	depends on MAC80211
@@ -278,7 +278,8 @@
 source "drivers/net/wireless/p54/Kconfig"
 source "drivers/net/wireless/rt2x00/Kconfig"
 source "drivers/net/wireless/mediatek/Kconfig"
-source "drivers/net/wireless/rtlwifi/Kconfig"
+source "drivers/net/wireless/realtek/rtlwifi/Kconfig"
+source "drivers/net/wireless/realtek/rtl8xxxu/Kconfig"
 source "drivers/net/wireless/ti/Kconfig"
 source "drivers/net/wireless/zd1211rw/Kconfig"
 source "drivers/net/wireless/mwifiex/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 6b9e729..740fdd3 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -22,9 +22,7 @@
 obj-$(CONFIG_B43)		+= b43/
 obj-$(CONFIG_B43LEGACY)		+= b43legacy/
 obj-$(CONFIG_ZD1211RW)		+= zd1211rw/
-obj-$(CONFIG_RTL8180)		+= rtl818x/
-obj-$(CONFIG_RTL8187)		+= rtl818x/
-obj-$(CONFIG_RTLWIFI)		+= rtlwifi/
+obj-$(CONFIG_WLAN)		+= realtek/
 
 # 16-bit wireless PCMCIA client drivers
 obj-$(CONFIG_PCMCIA_RAYCS)	+= ray_cs.o
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index f00aafd..484c1a1 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -6212,7 +6212,7 @@
 			       struct ieee80211_vif *vif,
 			       enum ieee80211_ampdu_mlme_action action,
 			       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			       u8 buf_size)
+			       u8 buf_size, bool amsdu)
 {
 	struct ath10k *ar = hw->priv;
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index 6314ae2..9d17a53 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -610,8 +610,8 @@
 #define AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ      -127
 #define AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ      -116
 
-#define AR_PHY_CCA_NOM_VAL_9287_2GHZ           -120
+#define AR_PHY_CCA_NOM_VAL_9287_2GHZ           -112
 #define AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ    -127
-#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ    -110
+#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ    -97
 
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 79fd3b2..8b238c1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -857,7 +857,7 @@
 			       qca956x_1p0_common_rx_gain_table);
 		INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
 			       qca956x_1p0_common_rx_gain_bounds);
-		INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+		INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
 			       qca956x_1p0_xlna_only);
 	} else if (AR_SREV_9580(ah))
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
@@ -942,7 +942,7 @@
 			       ar9462_2p1_baseband_core_mix_rxgain);
 		INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
 			       ar9462_2p1_baseband_postamble_mix_rxgain);
-		INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+		INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
 			       ar9462_2p1_baseband_postamble_5g_xlna);
 	} else if (AR_SREV_9462_20(ah)) {
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
@@ -951,7 +951,7 @@
 			       ar9462_2p0_baseband_core_mix_rxgain);
 		INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
 			       ar9462_2p0_baseband_postamble_mix_rxgain);
-		INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+		INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
 			       ar9462_2p0_baseband_postamble_5g_xlna);
 	}
 }
@@ -961,12 +961,12 @@
 	if (AR_SREV_9462_21(ah)) {
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
 			       ar9462_2p1_common_5g_xlna_only_rxgain);
-		INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+		INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
 			       ar9462_2p1_baseband_postamble_5g_xlna);
 	} else if (AR_SREV_9462_20(ah)) {
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
 			       ar9462_2p0_common_5g_xlna_only_rxgain);
-		INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+		INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
 			       ar9462_2p0_baseband_postamble_5g_xlna);
 	}
 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 1ad66b7..201425e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -926,19 +926,18 @@
 		 */
 		if ((ar9003_hw_get_rx_gain_idx(ah) == 2) ||
 		    (ar9003_hw_get_rx_gain_idx(ah) == 3)) {
-			REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+			REG_WRITE_ARRAY(&ah->ini_modes_rxgain_xlna,
 					modesIndex, regWrites);
 		}
-
-		if (AR_SREV_9561(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
-			REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
-					modesIndex, regWrites);
 	}
 
 	if (AR_SREV_9550(ah) || AR_SREV_9561(ah))
 		REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
 				regWrites);
 
+	if (AR_SREV_9561(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
+		REG_WRITE_ARRAY(&ah->ini_modes_rxgain_xlna,
+				modesIndex, regWrites);
 	/*
 	 * TXGAIN initvals.
 	 */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 172a9ff4a..a680a97 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1659,7 +1659,7 @@
 				  struct ieee80211_vif *vif,
 				  enum ieee80211_ampdu_mlme_action action,
 				  struct ieee80211_sta *sta,
-				  u16 tid, u16 *ssn, u8 buf_size)
+				  u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
 {
 	struct ath9k_htc_priv *priv = hw->priv;
 	struct ath9k_htc_sta *ista;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index e8454db..4f0a3f6 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -919,7 +919,7 @@
 	struct ar5416IniArray iniCckfirJapan2484;
 	struct ar5416IniArray iniModes_9271_ANI_reg;
 	struct ar5416IniArray ini_radio_post_sys2ant;
-	struct ar5416IniArray ini_modes_rxgain_5g_xlna;
+	struct ar5416IniArray ini_modes_rxgain_xlna;
 	struct ar5416IniArray ini_modes_rxgain_bb_core;
 	struct ar5416IniArray ini_modes_rxgain_bb_postamble;
 
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 56abb9d..d184e68 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1862,7 +1862,7 @@
 			      struct ieee80211_vif *vif,
 			      enum ieee80211_ampdu_mlme_action action,
 			      struct ieee80211_sta *sta,
-			      u16 tid, u16 *ssn, u8 buf_size)
+			      u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
 {
 	struct ath_softc *sc = hw->priv;
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 170c209..19d3d64 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1415,7 +1415,7 @@
 				    struct ieee80211_vif *vif,
 				    enum ieee80211_ampdu_mlme_action action,
 				    struct ieee80211_sta *sta,
-				    u16 tid, u16 *ssn, u8 buf_size)
+				    u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
 {
 	struct ar9170 *ar = hw->priv;
 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 900e72a..7c169ab 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -859,7 +859,7 @@
 		    struct ieee80211_vif *vif,
 		    enum ieee80211_ampdu_mlme_action action,
 		    struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-		    u8 buf_size)
+		    u8 buf_size, bool amsdu)
 {
 	struct wcn36xx *wcn = hw->priv;
 	struct wcn36xx_sta *sta_priv = NULL;
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index fe3dc12..ab42b1f 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -82,5 +82,6 @@
 config BRCMDBG
 	bool "Broadcom driver debug functions"
 	depends on BRCMSMAC || BRCMFMAC
+	select WANT_DEV_COREDUMP
 	---help---
 	  Selecting this enables additional code for debug purposes.
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
index 89e6a4d..230cad7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
@@ -65,6 +65,8 @@
  * @rxctl: receive a control response message from dongle.
  * @gettxq: obtain a reference of bus transmit queue (optional).
  * @wowl_config: specify if dongle is configured for wowl when going to suspend
+ * @get_ramsize: obtain size of device memory.
+ * @get_memdump: obtain device memory dump in provided buffer.
  *
  * This structure provides an abstract interface towards the
  * bus specific driver. For control messages to common driver
@@ -79,6 +81,8 @@
 	int (*rxctl)(struct device *dev, unsigned char *msg, uint len);
 	struct pktq * (*gettxq)(struct device *dev);
 	void (*wowl_config)(struct device *dev, bool enabled);
+	size_t (*get_ramsize)(struct device *dev);
+	int (*get_memdump)(struct device *dev, void *data, size_t len);
 };
 
 
@@ -185,6 +189,23 @@
 		bus->ops->wowl_config(bus->dev, enabled);
 }
 
+static inline size_t brcmf_bus_get_ramsize(struct brcmf_bus *bus)
+{
+	if (!bus->ops->get_ramsize)
+		return 0;
+
+	return bus->ops->get_ramsize(bus->dev);
+}
+
+static inline
+int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len)
+{
+	if (!bus->ops->get_memdump)
+		return -EOPNOTSUPP;
+
+	return bus->ops->get_memdump(bus->dev, data, len);
+}
+
 /*
  * interface functions from common layer
  */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index 891f4ed..deb5f78 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -840,7 +840,6 @@
 			err = brcmf_p2p_ifchange(cfg, BRCMF_FIL_P2P_IF_GO);
 		}
 		if (!err) {
-			set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
 			brcmf_dbg(INFO, "IF Type = AP\n");
 		}
 	} else {
@@ -2432,6 +2431,9 @@
 	struct brcmf_sta_info_le sta_info_le;
 	u32 sta_flags;
 	u32 is_tdls_peer;
+	s32 total_rssi;
+	s32 count_rssi;
+	u32 i;
 
 	brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
 	if (!check_vif_up(ifp->vif))
@@ -2478,13 +2480,13 @@
 		sinfo->rx_packets += le32_to_cpu(sta_info_le.rx_mcast_pkts);
 		if (sinfo->tx_packets) {
 			sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
-			sinfo->txrate.legacy = le32_to_cpu(sta_info_le.tx_rate);
-			sinfo->txrate.legacy /= 100;
+			sinfo->txrate.legacy =
+				le32_to_cpu(sta_info_le.tx_rate) / 100;
 		}
 		if (sinfo->rx_packets) {
 			sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
-			sinfo->rxrate.legacy = le32_to_cpu(sta_info_le.rx_rate);
-			sinfo->rxrate.legacy /= 100;
+			sinfo->rxrate.legacy =
+				le32_to_cpu(sta_info_le.rx_rate) / 100;
 		}
 		if (le16_to_cpu(sta_info_le.ver) >= 4) {
 			sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
@@ -2492,12 +2494,61 @@
 			sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
 			sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
 		}
+		total_rssi = 0;
+		count_rssi = 0;
+		for (i = 0; i < BRCMF_ANT_MAX; i++) {
+			if (sta_info_le.rssi[i]) {
+				sinfo->chain_signal_avg[count_rssi] =
+					sta_info_le.rssi[i];
+				sinfo->chain_signal[count_rssi] =
+					sta_info_le.rssi[i];
+				total_rssi += sta_info_le.rssi[i];
+				count_rssi++;
+			}
+		}
+		if (count_rssi) {
+			sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL);
+			sinfo->chains = count_rssi;
+
+			sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+			total_rssi /= count_rssi;
+			sinfo->signal = total_rssi;
+		}
 	}
 done:
 	brcmf_dbg(TRACE, "Exit\n");
 	return err;
 }
 
+static int
+brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev,
+			    int idx, u8 *mac, struct station_info *sinfo)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	s32 err;
+
+	brcmf_dbg(TRACE, "Enter, idx %d\n", idx);
+
+	if (idx == 0) {
+		cfg->assoclist.count = cpu_to_le32(BRCMF_MAX_ASSOCLIST);
+		err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_ASSOCLIST,
+					     &cfg->assoclist,
+					     sizeof(cfg->assoclist));
+		if (err) {
+			brcmf_err("BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n",
+				  err);
+			cfg->assoclist.count = 0;
+			return -EOPNOTSUPP;
+		}
+	}
+	if (idx < le32_to_cpu(cfg->assoclist.count)) {
+		memcpy(mac, cfg->assoclist.mac[idx], ETH_ALEN);
+		return brcmf_cfg80211_get_station(wiphy, ndev, mac, sinfo);
+	}
+	return -ENOENT;
+}
+
 static s32
 brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
 			   bool enabled, s32 timeout)
@@ -4199,8 +4250,8 @@
 
 		brcmf_dbg(TRACE, "GO mode configuration complete\n");
 	}
-	clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
 	set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+	brcmf_net_setcarrier(ifp, true);
 
 exit:
 	if ((err) && (!mbss)) {
@@ -4264,8 +4315,8 @@
 	}
 	brcmf_set_mpc(ifp, 1);
 	brcmf_configure_arp_offload(ifp, true);
-	set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
 	clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+	brcmf_net_setcarrier(ifp, false);
 
 	return err;
 }
@@ -4597,6 +4648,7 @@
 	.join_ibss = brcmf_cfg80211_join_ibss,
 	.leave_ibss = brcmf_cfg80211_leave_ibss,
 	.get_station = brcmf_cfg80211_get_station,
+	.dump_station = brcmf_cfg80211_dump_station,
 	.set_tx_power = brcmf_cfg80211_set_tx_power,
 	.get_tx_power = brcmf_cfg80211_get_tx_power,
 	.add_key = brcmf_cfg80211_add_key,
@@ -4974,6 +5026,7 @@
 				&ifp->vif->sme_state);
 		} else
 			brcmf_bss_connect_done(cfg, ndev, e, true);
+		brcmf_net_setcarrier(ifp, true);
 	} else if (brcmf_is_linkdown(e)) {
 		brcmf_dbg(CONN, "Linkdown\n");
 		if (!brcmf_is_ibssmode(ifp->vif)) {
@@ -4983,6 +5036,7 @@
 		brcmf_init_prof(ndev_to_prof(ndev));
 		if (ndev != cfg_to_ndev(cfg))
 			complete(&cfg->vif_disabled);
+		brcmf_net_setcarrier(ifp, false);
 	} else if (brcmf_is_nonetwork(cfg, e)) {
 		if (brcmf_is_ibssmode(ifp->vif))
 			clear_bit(BRCMF_VIF_STATUS_CONNECTING,
@@ -6238,6 +6292,17 @@
 		else
 			*cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
 	}
+	/* p2p might require that "if-events" get processed by fweh. So
+	 * activate the already registered event handlers now and activate
+	 * the rest when initialization has completed. drvr->config needs to
+	 * be assigned before activating events.
+	 */
+	drvr->config = cfg;
+	err = brcmf_fweh_activate_events(ifp);
+	if (err) {
+		brcmf_err("FWEH activation failed (%d)\n", err);
+		goto wiphy_unreg_out;
+	}
 
 	err = brcmf_p2p_attach(cfg, p2pdev_forced);
 	if (err) {
@@ -6260,6 +6325,13 @@
 				    brcmf_notify_tdls_peer_event);
 	}
 
+	/* (re-) activate FWEH event handling */
+	err = brcmf_fweh_activate_events(ifp);
+	if (err) {
+		brcmf_err("FWEH activation failed (%d)\n", err);
+		goto wiphy_unreg_out;
+	}
+
 	return cfg;
 
 wiphy_unreg_out:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
index 3f5e550..6a878c8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
@@ -143,7 +143,6 @@
  * @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress.
  * @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully.
  * @BRCMF_VIF_STATUS_DISCONNECTING: disconnect/disable in progress.
- * @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation.
  * @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
  */
 enum brcmf_vif_status {
@@ -151,7 +150,6 @@
 	BRCMF_VIF_STATUS_CONNECTING,
 	BRCMF_VIF_STATUS_CONNECTED,
 	BRCMF_VIF_STATUS_DISCONNECTING,
-	BRCMF_VIF_STATUS_AP_CREATING,
 	BRCMF_VIF_STATUS_AP_CREATED
 };
 
@@ -407,6 +405,7 @@
 	struct brcmu_d11inf d11inf;
 	bool wowl_enabled;
 	u32 pre_wowl_pmmode;
+	struct brcmf_assoclist_le assoclist;
 };
 
 /**
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index ffc3ace..f04833d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -682,6 +682,7 @@
 	case BRCM_CC_43570_CHIP_ID:
 	case BRCM_CC_4358_CHIP_ID:
 	case BRCM_CC_43602_CHIP_ID:
+	case BRCM_CC_4371_CHIP_ID:
 		return 0x180000;
 	case BRCM_CC_4365_CHIP_ID:
 	case BRCM_CC_4366_CHIP_ID:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/common.h b/drivers/net/wireless/brcm80211/brcmfmac/common.h
index 0d39d80..21c7488 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/common.h
@@ -17,4 +17,7 @@
 
 extern const u8 ALLFFMAC[ETH_ALEN];
 
+/* Sets dongle media info (drv_version, mac address). */
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+
 #endif /* BRCMFMAC_COMMON_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c
index 8c2a280..b5ab98e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c
@@ -33,6 +33,7 @@
 #include "feature.h"
 #include "proto.h"
 #include "pcie.h"
+#include "common.h"
 
 MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
@@ -634,8 +635,7 @@
 
 	brcmf_cfg80211_down(ndev);
 
-	/* Set state and stop OS transmissions */
-	netif_stop_queue(ndev);
+	brcmf_net_setcarrier(ifp, false);
 
 	return 0;
 }
@@ -669,8 +669,8 @@
 		return -EIO;
 	}
 
-	/* Allow transmit calls */
-	netif_start_queue(ndev);
+	/* Clear, carrier, set when connected or AP mode. */
+	netif_carrier_off(ndev);
 	return 0;
 }
 
@@ -735,6 +735,24 @@
 		brcmf_cfg80211_free_netdev(ndev);
 }
 
+void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on)
+{
+	struct net_device *ndev;
+
+	brcmf_dbg(TRACE, "Enter, idx=%d carrier=%d\n", ifp->bssidx, on);
+
+	ndev = ifp->ndev;
+	brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_DISCONNECTED, !on);
+	if (on) {
+		if (!netif_carrier_ok(ndev))
+			netif_carrier_on(ndev);
+
+	} else {
+		if (netif_carrier_ok(ndev))
+			netif_carrier_off(ndev);
+	}
+}
+
 static int brcmf_net_p2p_open(struct net_device *ndev)
 {
 	brcmf_dbg(TRACE, "Enter\n");
@@ -828,8 +846,8 @@
 	} else {
 		brcmf_dbg(INFO, "allocate netdev interface\n");
 		/* Allocate netdev, including space for private structure */
-		ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN,
-				    ether_setup);
+		ndev = alloc_netdev(sizeof(*ifp), is_p2pdev ? "p2p%d" : name,
+				    NET_NAME_UNKNOWN, ether_setup);
 		if (!ndev)
 			return ERR_PTR(-ENOMEM);
 
@@ -957,8 +975,8 @@
 	drvr->bus_if = dev_get_drvdata(dev);
 	drvr->bus_if->drvr = drvr;
 
-	/* create device debugfs folder */
-	brcmf_debugfs_attach(drvr);
+	/* attach debug facilities */
+	brcmf_debug_attach(drvr);
 
 	/* Attach and link in the protocol */
 	ret = brcmf_proto_attach(drvr);
@@ -1021,12 +1039,7 @@
 	if (IS_ERR(ifp))
 		return PTR_ERR(ifp);
 
-	if (brcmf_p2p_enable)
-		p2p_ifp = brcmf_add_if(drvr, 1, 0, false, "p2p%d", NULL);
-	else
-		p2p_ifp = NULL;
-	if (IS_ERR(p2p_ifp))
-		p2p_ifp = NULL;
+	p2p_ifp = NULL;
 
 	/* signal bus ready */
 	brcmf_bus_change_state(bus_if, BRCMF_BUS_UP);
@@ -1060,11 +1073,13 @@
 		goto fail;
 	}
 
-	ret = brcmf_fweh_activate_events(ifp);
-	if (ret < 0)
-		goto fail;
-
 	ret = brcmf_net_attach(ifp, false);
+
+	if ((!ret) && (brcmf_p2p_enable)) {
+		p2p_ifp = drvr->iflist[1];
+		if (p2p_ifp)
+			ret = brcmf_net_p2p_attach(p2p_ifp);
+	}
 fail:
 	if (ret < 0) {
 		brcmf_err("failed: %d\n", ret);
@@ -1076,20 +1091,12 @@
 			brcmf_fws_del_interface(ifp);
 			brcmf_fws_deinit(drvr);
 		}
-		if (drvr->iflist[0]) {
+		if (ifp)
 			brcmf_net_detach(ifp->ndev);
-			drvr->iflist[0] = NULL;
-		}
-		if (p2p_ifp) {
+		if (p2p_ifp)
 			brcmf_net_detach(p2p_ifp->ndev);
-			drvr->iflist[1] = NULL;
-		}
 		return ret;
 	}
-	if ((brcmf_p2p_enable) && (p2p_ifp))
-		if (brcmf_net_p2p_attach(p2p_ifp) < 0)
-			brcmf_p2p_enable = 0;
-
 	return 0;
 }
 
@@ -1155,7 +1162,7 @@
 
 	brcmf_proto_detach(drvr);
 
-	brcmf_debugfs_detach(drvr);
+	brcmf_debug_detach(drvr);
 	bus_if->drvr = NULL;
 	kfree(drvr);
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h
index d81ff95..2f9101b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h
@@ -154,10 +154,13 @@
  *	netif stopped due to firmware signalling flow control.
  * @BRCMF_NETIF_STOP_REASON_FLOW:
  *	netif stopped due to flowring full.
+ * @BRCMF_NETIF_STOP_REASON_DISCONNECTED:
+ *	netif stopped due to not being connected (STA mode).
  */
 enum brcmf_netif_stop_reason {
-	BRCMF_NETIF_STOP_REASON_FWS_FC = 1,
-	BRCMF_NETIF_STOP_REASON_FLOW = 2
+	BRCMF_NETIF_STOP_REASON_FWS_FC = BIT(0),
+	BRCMF_NETIF_STOP_REASON_FLOW = BIT(1),
+	BRCMF_NETIF_STOP_REASON_DISCONNECTED = BIT(2)
 };
 
 /**
@@ -213,8 +216,6 @@
 			  enum brcmf_netif_stop_reason reason, bool state);
 void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
 void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
-
-/* Sets dongle media info (drv_version, mac address). */
-int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
 
 #endif /* BRCMFMAC_CORE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
index 2d6d005..1299dcc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/debug.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
@@ -16,15 +16,45 @@
 #include <linux/debugfs.h>
 #include <linux/netdevice.h>
 #include <linux/module.h>
+#include <linux/devcoredump.h>
 
 #include <brcmu_wifi.h>
 #include <brcmu_utils.h>
 #include "core.h"
 #include "bus.h"
+#include "fweh.h"
 #include "debug.h"
 
 static struct dentry *root_folder;
 
+static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
+				      size_t len)
+{
+	void *dump;
+	size_t ramsize;
+
+	ramsize = brcmf_bus_get_ramsize(bus);
+	if (ramsize) {
+		dump = vzalloc(len + ramsize);
+		if (!dump)
+			return -ENOMEM;
+		memcpy(dump, data, len);
+		brcmf_bus_get_memdump(bus, dump + len, ramsize);
+		dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
+	}
+	return 0;
+}
+
+static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp,
+					   const struct brcmf_event_msg *evtmsg,
+					   void *data)
+{
+	brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
+
+	return brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
+					  evtmsg->datalen);
+}
+
 void brcmf_debugfs_init(void)
 {
 	root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
@@ -41,7 +71,7 @@
 	root_folder = NULL;
 }
 
-int brcmf_debugfs_attach(struct brcmf_pub *drvr)
+int brcmf_debug_attach(struct brcmf_pub *drvr)
 {
 	struct device *dev = drvr->bus_if->dev;
 
@@ -49,12 +79,18 @@
 		return -ENODEV;
 
 	drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
+	if (IS_ERR(drvr->dbgfs_dir))
+		return PTR_ERR(drvr->dbgfs_dir);
 
-	return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
+
+	return brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
+				   brcmf_debug_psm_watchdog_notify);
 }
 
-void brcmf_debugfs_detach(struct brcmf_pub *drvr)
+void brcmf_debug_detach(struct brcmf_pub *drvr)
 {
+	brcmf_fweh_unregister(drvr, BRCMF_E_PSM_WATCHDOG);
+
 	if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
 		debugfs_remove_recursive(drvr->dbgfs_dir);
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/brcm80211/brcmfmac/debug.h
index 48648ca..d0d9676 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.h
@@ -109,8 +109,8 @@
 #ifdef DEBUG
 void brcmf_debugfs_init(void);
 void brcmf_debugfs_exit(void);
-int brcmf_debugfs_attach(struct brcmf_pub *drvr);
-void brcmf_debugfs_detach(struct brcmf_pub *drvr);
+int brcmf_debug_attach(struct brcmf_pub *drvr);
+void brcmf_debug_detach(struct brcmf_pub *drvr);
 struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
 int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
 			    int (*read_fn)(struct seq_file *seq, void *data));
@@ -121,11 +121,11 @@
 static inline void brcmf_debugfs_exit(void)
 {
 }
-static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr)
+static inline int brcmf_debug_attach(struct brcmf_pub *drvr)
 {
 	return 0;
 }
-static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
+static inline void brcmf_debug_detach(struct brcmf_pub *drvr)
 {
 }
 static inline
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 971920f..4248f3c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -29,7 +29,7 @@
 #define BRCMF_FW_NVRAM_PCIEDEV_LEN		10	/* pcie/1/4/ + \0 */
 
 char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
-module_param_string(firmware_path, brcmf_firmware_path,
+module_param_string(alternative_fw_path, brcmf_firmware_path,
 		    BRCMF_FW_PATH_LEN, 0440);
 
 enum nvram_parser_state {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index 383d6fa..3878b6f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -213,7 +213,8 @@
 				   is_p2pdev, emsg->ifname, emsg->addr);
 		if (IS_ERR(ifp))
 			return;
-		brcmf_fws_add_interface(ifp);
+		if (!is_p2pdev)
+			brcmf_fws_add_interface(ifp);
 		if (!drvr->fweh.evt_handler[BRCMF_E_IF])
 			if (brcmf_net_attach(ifp, false) < 0)
 				return;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
index 5434dcf..b20fc0f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -72,6 +72,7 @@
 #define BRCMF_C_GET_BSS_INFO			136
 #define BRCMF_C_GET_BANDLIST			140
 #define BRCMF_C_SET_SCB_TIMEOUT			158
+#define BRCMF_C_GET_ASSOCLIST			159
 #define BRCMF_C_GET_PHYLIST			180
 #define BRCMF_C_SET_SCAN_CHANNEL_TIME		185
 #define BRCMF_C_SET_SCAN_UNASSOC_TIME		187
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 297911f..daa427b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -119,6 +119,8 @@
 #define BRCMF_COUNTRY_BUF_SZ		4
 #define BRCMF_ANT_MAX			4
 
+#define BRCMF_MAX_ASSOCLIST		128
+
 /* join preference types for join_pref iovar */
 enum brcmf_join_pref_types {
 	BRCMF_JOIN_PREF_RSSI = 1,
@@ -621,4 +623,15 @@
 	__le32 nvramrev;
 };
 
+/**
+ * struct brcmf_assoclist_le - request assoc list.
+ *
+ * @count: indicates number of stations.
+ * @mac: MAC addresses of stations.
+ */
+struct brcmf_assoclist_le {
+	__le32 count;
+	u8 mac[BRCMF_MAX_ASSOCLIST][ETH_ALEN];
+};
+
 #endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 7eff9de..44e618f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -873,9 +873,6 @@
 	commonring = msgbuf->flowrings[flowid];
 	atomic_dec(&commonring->outstanding_tx);
 
-	/* Hante: i believe this was a bug as tx_status->msg.ifidx was used
-	 * in brcmf_txfinalize as index in drvr->iflist. Can you confirm/deny?
-	 */
 	brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx),
 			 skb, true);
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index 37a8c352..d224b3d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -2353,83 +2353,30 @@
  * brcmf_p2p_attach() - attach for P2P.
  *
  * @cfg: driver private data for cfg80211 interface.
+ * @p2pdev_forced: create p2p device interface at attach.
  */
 s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced)
 {
-	struct brcmf_if *pri_ifp;
-	struct brcmf_if *p2p_ifp;
-	struct brcmf_cfg80211_vif *p2p_vif;
 	struct brcmf_p2p_info *p2p;
-	struct brcmf_pub *drvr;
-	s32 bssidx;
+	struct brcmf_if *pri_ifp;
 	s32 err = 0;
+	void *err_ptr;
 
 	p2p = &cfg->p2p;
 	p2p->cfg = cfg;
 
-	drvr = cfg->pub;
-
-	pri_ifp = brcmf_get_ifp(drvr, 0);
+	pri_ifp = brcmf_get_ifp(cfg->pub, 0);
 	p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
 
 	if (p2pdev_forced) {
-		p2p_ifp = drvr->iflist[1];
+		err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL);
+		if (IS_ERR(err_ptr)) {
+			brcmf_err("P2P device creation failed.\n");
+			err = PTR_ERR(err_ptr);
+		}
 	} else {
-		p2p_ifp = NULL;
 		p2p->p2pdev_dynamically = true;
 	}
-	if (p2p_ifp) {
-		p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE,
-					  false);
-		if (IS_ERR(p2p_vif)) {
-			brcmf_err("could not create discovery vif\n");
-			err = -ENOMEM;
-			goto exit;
-		}
-
-		p2p_vif->ifp = p2p_ifp;
-		p2p_ifp->vif = p2p_vif;
-		p2p_vif->wdev.netdev = p2p_ifp->ndev;
-		p2p_ifp->ndev->ieee80211_ptr = &p2p_vif->wdev;
-		SET_NETDEV_DEV(p2p_ifp->ndev, wiphy_dev(cfg->wiphy));
-
-		p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif;
-
-		brcmf_p2p_generate_bss_mac(p2p, NULL);
-		memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
-		brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
-
-		brcmf_fweh_p2pdev_setup(pri_ifp, true);
-
-		/* Initialize P2P Discovery in the firmware */
-		err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
-		if (err < 0) {
-			brcmf_err("set p2p_disc error\n");
-			brcmf_free_vif(p2p_vif);
-			goto exit;
-		}
-		/* obtain bsscfg index for P2P discovery */
-		err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
-		if (err < 0) {
-			brcmf_err("retrieving discover bsscfg index failed\n");
-			brcmf_free_vif(p2p_vif);
-			goto exit;
-		}
-		/* Verify that firmware uses same bssidx as driver !! */
-		if (p2p_ifp->bssidx != bssidx) {
-			brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
-				  bssidx, p2p_ifp->bssidx);
-			brcmf_free_vif(p2p_vif);
-			goto exit;
-		}
-
-		init_completion(&p2p->send_af_done);
-		INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
-		init_completion(&p2p->afx_hdl.act_frm_scan);
-		init_completion(&p2p->wait_next_af);
-exit:
-		brcmf_fweh_p2pdev_setup(pri_ifp, false);
-	}
 	return err;
 }
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 30baf35..83d8042 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -59,6 +59,8 @@
 #define BRCMF_PCIE_4365_NVRAM_NAME		"brcm/brcmfmac4365b-pcie.txt"
 #define BRCMF_PCIE_4366_FW_NAME			"brcm/brcmfmac4366b-pcie.bin"
 #define BRCMF_PCIE_4366_NVRAM_NAME		"brcm/brcmfmac4366b-pcie.txt"
+#define BRCMF_PCIE_4371_FW_NAME			"brcm/brcmfmac4371-pcie.bin"
+#define BRCMF_PCIE_4371_NVRAM_NAME		"brcm/brcmfmac4371-pcie.txt"
 
 #define BRCMF_PCIE_FW_UP_TIMEOUT		2000 /* msec */
 
@@ -212,6 +214,8 @@
 MODULE_FIRMWARE(BRCMF_PCIE_4365_NVRAM_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_4366_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_4366_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4371_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4371_NVRAM_NAME);
 
 
 struct brcmf_pcie_console {
@@ -448,6 +452,47 @@
 }
 
 
+static void
+brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+			  void *dstaddr, u32 len)
+{
+	void __iomem *address = devinfo->tcm + mem_offset;
+	__le32 *dst32;
+	__le16 *dst16;
+	u8 *dst8;
+
+	if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
+		if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
+			dst8 = (u8 *)dstaddr;
+			while (len) {
+				*dst8 = ioread8(address);
+				address++;
+				dst8++;
+				len--;
+			}
+		} else {
+			len = len / 2;
+			dst16 = (__le16 *)dstaddr;
+			while (len) {
+				*dst16 = cpu_to_le16(ioread16(address));
+				address += 2;
+				dst16++;
+				len--;
+			}
+		}
+	} else {
+		len = len / 4;
+		dst32 = (__le32 *)dstaddr;
+		while (len) {
+			*dst32 = cpu_to_le32(ioread32(address));
+			address += 4;
+			dst32++;
+			len--;
+		}
+	}
+}
+
+
 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
 		CHIPCREGOFFS(reg), value)
 
@@ -1352,12 +1397,36 @@
 }
 
 
+static size_t brcmf_pcie_get_ramsize(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+
+	return devinfo->ci->ramsize - devinfo->ci->srsize;
+}
+
+
+static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+
+	brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
+	brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
+	return 0;
+}
+
+
 static struct brcmf_bus_ops brcmf_pcie_bus_ops = {
 	.txdata = brcmf_pcie_tx,
 	.stop = brcmf_pcie_down,
 	.txctl = brcmf_pcie_tx_ctlpkt,
 	.rxctl = brcmf_pcie_rx_ctlpkt,
 	.wowl_config = brcmf_pcie_wowl_config,
+	.get_ramsize = brcmf_pcie_get_ramsize,
+	.get_memdump = brcmf_pcie_get_memdump,
 };
 
 
@@ -1456,6 +1525,10 @@
 		fw_name = BRCMF_PCIE_4366_FW_NAME;
 		nvram_name = BRCMF_PCIE_4366_NVRAM_NAME;
 		break;
+	case BRCM_CC_4371_CHIP_ID:
+		fw_name = BRCMF_PCIE_4371_FW_NAME;
+		nvram_name = BRCMF_PCIE_4371_NVRAM_NAME;
+		break;
 	default:
 		brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
 		return -ENODEV;
@@ -1995,6 +2068,7 @@
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
 	{ /* end: all zeroes */ }
 };
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index 7f574f2..7e74ac3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -3539,6 +3539,51 @@
 	return err;
 }
 
+static size_t brcmf_sdio_bus_get_ramsize(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+
+	return bus->ci->ramsize - bus->ci->srsize;
+}
+
+static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
+				      size_t mem_size)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+	int err;
+	int address;
+	int offset;
+	int len;
+
+	brcmf_dbg(INFO, "dump at 0x%08x: size=%zu\n", bus->ci->rambase,
+		  mem_size);
+
+	address = bus->ci->rambase;
+	offset = err = 0;
+	sdio_claim_host(sdiodev->func[1]);
+	while (offset < mem_size) {
+		len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK :
+		      mem_size - offset;
+		err = brcmf_sdiod_ramrw(sdiodev, false, address, data, len);
+		if (err) {
+			brcmf_err("error %d on reading %d membytes at 0x%08x\n",
+				  err, len, address);
+			goto done;
+		}
+		data += len;
+		offset += len;
+		address += len;
+	}
+
+done:
+	sdio_release_host(sdiodev->func[1]);
+	return err;
+}
+
 void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
 {
 	if (!bus->dpc_triggered) {
@@ -3987,7 +4032,9 @@
 	.txctl = brcmf_sdio_bus_txctl,
 	.rxctl = brcmf_sdio_bus_rxctl,
 	.gettxq = brcmf_sdio_bus_gettxq,
-	.wowl_config = brcmf_sdio_wowl_config
+	.wowl_config = brcmf_sdio_wowl_config,
+	.get_ramsize = brcmf_sdio_bus_get_ramsize,
+	.get_memdump = brcmf_sdio_bus_get_memdump,
 };
 
 static void brcmf_sdio_firmware_callback(struct device *dev,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index daba86d..689e64d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -144,6 +144,7 @@
 
 	struct usb_device *usbdev;
 	struct device *dev;
+	struct mutex dev_init_lock;
 
 	int ctl_in_pipe, ctl_out_pipe;
 	struct urb *ctl_urb; /* URB for control endpoint */
@@ -1204,6 +1205,8 @@
 	int ret;
 
 	brcmf_dbg(USB, "Start fw downloading\n");
+
+	devinfo = bus->bus_priv.usb->devinfo;
 	ret = check_file(fw->data);
 	if (ret < 0) {
 		brcmf_err("invalid firmware\n");
@@ -1211,7 +1214,6 @@
 		goto error;
 	}
 
-	devinfo = bus->bus_priv.usb->devinfo;
 	devinfo->image = fw->data;
 	devinfo->image_len = fw->size;
 
@@ -1224,9 +1226,11 @@
 	if (ret)
 		goto error;
 
+	mutex_unlock(&devinfo->dev_init_lock);
 	return;
 error:
 	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
+	mutex_unlock(&devinfo->dev_init_lock);
 	device_release_driver(dev);
 }
 
@@ -1264,6 +1268,7 @@
 		if (ret)
 			goto fail;
 		/* we are done */
+		mutex_unlock(&devinfo->dev_init_lock);
 		return 0;
 	}
 	bus->chip = bus_pub->devid;
@@ -1317,6 +1322,12 @@
 
 	devinfo->usbdev = usb;
 	devinfo->dev = &usb->dev;
+	/* Take an init lock, to protect for disconnect while still loading.
+	 * Necessary because of the asynchronous firmware load construction
+	 */
+	mutex_init(&devinfo->dev_init_lock);
+	mutex_lock(&devinfo->dev_init_lock);
+
 	usb_set_intfdata(intf, devinfo);
 
 	/* Check that the device supports only one configuration */
@@ -1391,6 +1402,7 @@
 	return 0;
 
 fail:
+	mutex_unlock(&devinfo->dev_init_lock);
 	kfree(devinfo);
 	usb_set_intfdata(intf, NULL);
 	return ret;
@@ -1403,8 +1415,19 @@
 
 	brcmf_dbg(USB, "Enter\n");
 	devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
-	brcmf_usb_disconnect_cb(devinfo);
-	kfree(devinfo);
+
+	if (devinfo) {
+		mutex_lock(&devinfo->dev_init_lock);
+		/* Make sure that devinfo still exists. Firmware probe routines
+		 * may have released the device and cleared the intfdata.
+		 */
+		if (!usb_get_intfdata(intf))
+			goto done;
+
+		brcmf_usb_disconnect_cb(devinfo);
+		kfree(devinfo);
+	}
+done:
 	brcmf_dbg(USB, "Exit\n");
 }
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index d2c5747..bec2dc1 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -820,7 +820,7 @@
 		    struct ieee80211_vif *vif,
 		    enum ieee80211_ampdu_mlme_action action,
 		    struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-		    u8 buf_size)
+		    u8 buf_size, bool amsdu)
 {
 	struct brcms_info *wl = hw->priv;
 	struct scb *scb = &wl->wlc->pri_scb;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 9728be0..218cbc8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -4585,7 +4585,7 @@
 		wlc_hw->machwcap_backup = wlc_hw->machwcap;
 
 		/* init tx fifo size */
-		WARN_ON((wlc_hw->corerev - XMTFIFOTBL_STARTREV) < 0 ||
+		WARN_ON(wlc_hw->corerev < XMTFIFOTBL_STARTREV ||
 			(wlc_hw->corerev - XMTFIFOTBL_STARTREV) >
 				ARRAY_SIZE(xmtfifo_sz));
 		wlc_hw->xmtfifo_sz =
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index d823734..aa06ea2 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -50,6 +50,7 @@
 #define BRCM_CC_43602_CHIP_ID		43602
 #define BRCM_CC_4365_CHIP_ID		0x4365
 #define BRCM_CC_4366_CHIP_ID		0x4366
+#define BRCM_CC_4371_CHIP_ID		0x4371
 
 /* USB Device IDs */
 #define BRCM_USB_43143_DEVICE_ID	0xbd1e
@@ -75,6 +76,7 @@
 #define BRCM_PCIE_4366_DEVICE_ID	0x43c3
 #define BRCM_PCIE_4366_2G_DEVICE_ID	0x43c4
 #define BRCM_PCIE_4366_5G_DEVICE_ID	0x43c5
+#define BRCM_PCIE_4371_DEVICE_ID	0x440d
 
 
 /* brcmsmac IDs */
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index b86500b..95a7fdb 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -2137,7 +2137,7 @@
 			struct ieee80211_vif *vif,
 			enum ieee80211_ampdu_mlme_action action,
 			struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			u8 buf_size)
+			u8 buf_size, bool amsdu)
 {
 	/* Aggregation is implemented fully in firmware,
 	 * including block ack negotiation. Do not allow
diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h
index b7e386b..bebb337 100644
--- a/drivers/net/wireless/cw1200/sta.h
+++ b/drivers/net/wireless/cw1200/sta.h
@@ -111,7 +111,7 @@
 			struct ieee80211_vif *vif,
 			enum ieee80211_ampdu_mlme_action action,
 			struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			u8 buf_size);
+			u8 buf_size, bool amsdu);
 
 void cw1200_suspend_resume(struct cw1200_common *priv,
 			  struct wsm_suspend_resume *arg);
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index a6877dd..cef7f7d 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -1091,8 +1091,6 @@
 		MFIE_STRING(TIM);
 		MFIE_STRING(IBSS_PARAMS);
 		MFIE_STRING(COUNTRY);
-		MFIE_STRING(HP_PARAMS);
-		MFIE_STRING(HP_TABLE);
 		MFIE_STRING(REQUEST);
 		MFIE_STRING(CHALLENGE);
 		MFIE_STRING(PWR_CONSTRAINT);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 44fa422..6656215 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5984,7 +5984,7 @@
 il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			enum ieee80211_ampdu_mlme_action action,
 			struct ieee80211_sta *sta, u16 tid, u16 * ssn,
-			u8 buf_size)
+			u8 buf_size, bool amsdu)
 {
 	struct il_priv *il = hw->priv;
 	int ret = -EINVAL;
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 3a57f71..8ab8706 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -184,7 +184,7 @@
 int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			    enum ieee80211_ampdu_mlme_action action,
 			    struct ieee80211_sta *sta, u16 tid, u16 * ssn,
-			    u8 buf_size);
+			    u8 buf_size, bool amsdu);
 int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		       struct ieee80211_sta *sta);
 void
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index aba0957..6e949df 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -142,6 +142,7 @@
 config IWLWIFI_DEVICE_TRACING
 	bool "iwlwifi device access tracing"
 	depends on EVENT_TRACING
+	default y
 	help
 	  Say Y here to trace all commands, including TX frames and IO
 	  accesses, sent to the device. If you say yes, iwlwifi will
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 453f7c3..b3ad34e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -731,7 +731,7 @@
 				   struct ieee80211_vif *vif,
 				   enum ieee80211_ampdu_mlme_action action,
 				   struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-				   u8 buf_size)
+				   u8 buf_size, bool amsdu)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 	int ret = -EINVAL;
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 5b25f36..d561181 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -73,11 +73,9 @@
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK	13
-#define IWL3165_UCODE_API_OK	13
 
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN	13
-#define IWL3165_UCODE_API_MIN	13
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION		0x0a1d
@@ -269,11 +267,6 @@
 	.name = "Intel(R) Dual Band Wireless AC 3165",
 	.fw_name_pre = IWL7265D_FW_PRE,
 	IWL_DEVICE_7000,
-	/* sparse doens't like the re-assignment but it is safe */
-#ifndef __CHECKER__
-	.ucode_api_ok = IWL3165_UCODE_API_OK,
-	.ucode_api_min = IWL3165_UCODE_API_MIN,
-#endif
 	.ht_params = &iwl7000_ht_params,
 	.nvm_ver = IWL3165_NVM_VERSION,
 	.nvm_calib_ver = IWL3165_TX_POWER_VERSION,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 939fa22..9109708 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -223,13 +223,13 @@
  * @support_tx_backoff: Support tx-backoff?
  */
 struct iwl_tt_params {
-	s32 ct_kill_entry;
-	s32 ct_kill_exit;
+	u32 ct_kill_entry;
+	u32 ct_kill_exit;
 	u32 ct_kill_duration;
-	s32 dynamic_smps_entry;
-	s32 dynamic_smps_exit;
-	s32 tx_protection_entry;
-	s32 tx_protection_exit;
+	u32 dynamic_smps_entry;
+	u32 dynamic_smps_exit;
+	u32 tx_protection_entry;
+	u32 tx_protection_exit;
 	struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
 	bool support_ct_kill;
 	bool support_dynamic_smps;
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index a86aa5b..463cadf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -450,7 +450,7 @@
 	u32 api_flags = le32_to_cpu(ucode_api->api_flags);
 	int i;
 
-	if (api_index >= IWL_API_MAX_BITS / 32) {
+	if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) {
 		IWL_ERR(drv, "api_index larger than supported by driver\n");
 		/* don't return an error so we can load FW that has more bits */
 		return 0;
@@ -472,7 +472,7 @@
 	u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
 	int i;
 
-	if (api_index >= IWL_CAPABILITIES_MAX_BITS / 32) {
+	if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) {
 		IWL_ERR(drv, "api_index larger than supported by driver\n");
 		/* don't return an error so we can load FW that has more bits */
 		return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 352d245..72ddd4a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -254,6 +254,8 @@
  *	instead of 3.
  * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
  *	(command version 3) that supports per-chain limits
+ *
+ * @NUM_IWL_UCODE_TLV_API: number of bits used
  */
 enum iwl_ucode_tlv_api {
 	IWL_UCODE_TLV_API_BT_COEX_SPLIT         = (__force iwl_ucode_tlv_api_t)3,
@@ -264,6 +266,12 @@
 	IWL_UCODE_TLV_API_NEW_VERSION		= (__force iwl_ucode_tlv_api_t)20,
 	IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY	= (__force iwl_ucode_tlv_api_t)24,
 	IWL_UCODE_TLV_API_TX_POWER_CHAIN	= (__force iwl_ucode_tlv_api_t)27,
+
+	NUM_IWL_UCODE_TLV_API
+#ifdef __CHECKER__
+		/* sparse says it cannot increment the previous enum member */
+		= 128
+#endif
 };
 
 typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
@@ -298,6 +306,8 @@
  *	is supported.
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
  * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
+ *
+ * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
  */
 enum iwl_ucode_tlv_capa {
 	IWL_UCODE_TLV_CAPA_D0I3_SUPPORT			= (__force iwl_ucode_tlv_capa_t)0,
@@ -320,6 +330,12 @@
 	IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC		= (__force iwl_ucode_tlv_capa_t)29,
 	IWL_UCODE_TLV_CAPA_BT_COEX_RRC			= (__force iwl_ucode_tlv_capa_t)30,
 	IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT		= (__force iwl_ucode_tlv_capa_t)31,
+
+	NUM_IWL_UCODE_TLV_CAPA
+#ifdef __CHECKER__
+		/* sparse says it cannot increment the previous enum member */
+		= 128
+#endif
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -330,9 +346,6 @@
 /* The default max probe length if not specified by the firmware file */
 #define IWL_DEFAULT_MAX_PROBE_LENGTH	200
 
-#define IWL_API_MAX_BITS		64
-#define IWL_CAPABILITIES_MAX_BITS	64
-
 /*
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 45e7321..84ec0ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -105,8 +105,8 @@
 	u32 n_scan_channels;
 	u32 standard_phy_calibration_size;
 	u32 flags;
-	unsigned long _api[BITS_TO_LONGS(IWL_API_MAX_BITS)];
-	unsigned long _capa[BITS_TO_LONGS(IWL_CAPABILITIES_MAX_BITS)];
+	unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
+	unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
 };
 
 static inline bool
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 3b8e85e..d829849 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -580,13 +580,15 @@
 	IWL_ERR_DEV(dev, "mac address is not found\n");
 }
 
+#define IWL_4165_DEVICE_ID 0x5501
+
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
 		   const __le16 *nvm_hw, const __le16 *nvm_sw,
 		   const __le16 *nvm_calib, const __le16 *regulatory,
 		   const __le16 *mac_override, const __le16 *phy_sku,
 		   u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
-		   u32 mac_addr0, u32 mac_addr1)
+		   u32 mac_addr0, u32 mac_addr1, u32 hw_id)
 {
 	struct iwl_nvm_data *data;
 	u32 sku;
@@ -625,6 +627,17 @@
 				    (sku & NVM_SKU_CAP_11AC_ENABLE);
 	data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
 
+	/*
+	 * OTP 0x52 bug work around
+	 * define antenna 1x1 according to MIMO disabled
+	 */
+	if (hw_id == IWL_4165_DEVICE_ID && data->sku_cap_mimo_disabled) {
+		data->valid_tx_ant = ANT_B;
+		data->valid_rx_ant = ANT_B;
+		tx_chains = ANT_B;
+		rx_chains = ANT_B;
+	}
+
 	data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
 
 	if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index 822ba52..9f44d81 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -79,7 +79,7 @@
 		   const __le16 *nvm_calib, const __le16 *regulatory,
 		   const __le16 *mac_override, const __le16 *phy_sku,
 		   u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
-		   u32 mac_addr0, u32 mac_addr1);
+		   u32 mac_addr0, u32 mac_addr1, u32 hw_id);
 
 /**
  * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index b47fe9d..2a58d68 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -108,7 +110,8 @@
  * interact with it. The driver layer typically calls the start and stop
  * handlers, the transport layer calls the others.
  *
- * All the handlers MUST be implemented
+ * All the handlers MUST be implemented, except @rx_rss which can be left
+ * out *iff* the opmode will never run on hardware with multi-queue capability.
  *
  * @start: start the op_mode. The transport layer is already allocated.
  *	May sleep
@@ -116,6 +119,10 @@
  *	May sleep
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
  *	HCMD this Rx responds to. Can't sleep.
+ * @rx_rss: data queue RX notification to the op_mode, for (data) notifications
+ *	received on the RSS queue(s). The queue parameter indicates which of the
+ *	RSS queues received this frame; it will always be non-zero.
+ *	This method must not sleep.
  * @queue_full: notifies that a HW queue is full.
  *	Must be atomic and called with BH disabled.
  * @queue_not_full: notifies that a HW queue is not full any more.
@@ -146,6 +153,8 @@
 	void (*stop)(struct iwl_op_mode *op_mode);
 	void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
 		   struct iwl_rx_cmd_buffer *rxb);
+	void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		       struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
 	void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
 	void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
 	bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -186,6 +195,14 @@
 	return op_mode->ops->rx(op_mode, napi, rxb);
 }
 
+static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode,
+				      struct napi_struct *napi,
+				      struct iwl_rx_cmd_buffer *rxb,
+				      unsigned int queue)
+{
+	op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
+}
+
 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
 					  int queue)
 {
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
index 9f8bcef..7161096 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -87,6 +87,7 @@
 	trans->cfg = cfg;
 	trans->ops = ops;
 	trans->dev_cmd_headroom = dev_cmd_headroom;
+	trans->num_rx_queues = 1;
 
 	snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
 		 "iwl_cmd_pool:%s", dev_name(trans->dev));
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index c829c50..bb51b6f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -386,6 +386,7 @@
 #define IWL_MAX_HW_QUEUES		32
 #define IWL_MAX_TID_COUNT	8
 #define IWL_FRAME_LIMIT	64
+#define IWL_MAX_RX_HW_QUEUES	16
 
 /**
  * enum iwl_wowlan_status - WoWLAN image/device status
@@ -654,6 +655,8 @@
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
  * @pm_support: set to true in start_hw if link pm is supported
  * @ltr_enabled: set to true if the LTR is enabled
+ * @num_rx_queues: number of RX queues allocated by the transport;
+ *	the transport must set this before calling iwl_drv_start()
  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
  *	The user should use iwl_trans_{alloc,free}_tx_cmd.
  * @dev_cmd_headroom: room needed for the transport's private use before the
@@ -693,6 +696,8 @@
 	bool pm_support;
 	bool ltr_enabled;
 
+	u8 num_rx_queues;
+
 	/* The following fields are internal only */
 	struct kmem_cache *dev_cmd_pool;
 	size_t dev_cmd_headroom;
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index b8ee312..a3ca6db 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -102,6 +102,7 @@
 #define IWL_MVM_QUOTA_THRESHOLD			4
 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE         0
 #define IWL_MVM_RS_DISABLE_P2P_MIMO		0
+#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK	1
 #define IWL_MVM_TOF_IS_RESPONDER		0
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE      2
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 334ae56..398bef6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -511,7 +511,8 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm *mvm = mvmvif->mvm;
-	int value, ret = -EINVAL;
+	u32 value;
+	int ret = -EINVAL;
 	char *data;
 
 	mutex_lock(&mvm->mutex);
@@ -599,7 +600,8 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm *mvm = mvmvif->mvm;
-	int value, ret = 0;
+	u32 value;
+	int ret = 0;
 	char *data;
 
 	mutex_lock(&mvm->mutex);
@@ -822,7 +824,8 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm *mvm = mvmvif->mvm;
-	int value, ret = 0;
+	u32 value;
+	int ret = 0;
 	char *data;
 
 	mutex_lock(&mvm->mutex);
@@ -892,6 +895,7 @@
 			goto out;
 		}
 		memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
+		goto out;
 	}
 
 	data = iwl_dbgfs_is_match("macaddr_mask=", buf);
@@ -903,21 +907,22 @@
 			goto out;
 		}
 		memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
+		goto out;
 	}
 
 	data = iwl_dbgfs_is_match("ap=", buf);
 	if (data) {
-		struct iwl_tof_range_req_ap_entry ap;
+		struct iwl_tof_range_req_ap_entry ap = {};
 		int size = sizeof(struct iwl_tof_range_req_ap_entry);
 		u16 burst_period;
 		u8 *mac = ap.bssid;
 		unsigned int i;
 
-		if (sscanf(data, "%u %hhd %hhx %hhx"
+		if (sscanf(data, "%u %hhd %hhd %hhd"
 			   "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
-			   "%hhx %hhx %hx"
-			   "%hhx %hhx %x"
-			   "%hhx %hhx %hhx %hhx",
+			   "%hhd %hhd %hd"
+			   "%hhd %hhd %d"
+			   "%hhx %hhd %hhd %hhd",
 			   &i, &ap.channel_num, &ap.bandwidth,
 			   &ap.ctrl_ch_position,
 			   mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
@@ -944,12 +949,12 @@
 	data = iwl_dbgfs_is_match("send_range_request=", buf);
 	if (data) {
 		ret = kstrtou32(data, 10, &value);
-		if (ret == 0 && value) {
+		if (ret == 0 && value)
 			ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
-			goto out;
-		}
+		goto out;
 	}
 
+	ret = -EINVAL;
 out:
 	mutex_unlock(&mvm->mutex);
 	return ret ?: count;
@@ -994,16 +999,18 @@
 		struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
 
 		pos += scnprintf(buf + pos, bufsz - pos,
-				"ap %.2d: channel_num=%hhx bw=%hhx"
-				" control=%hhx bssid=%pM type=%hhx"
-				" num_of_bursts=%hhx burst_period=%hx ftm=%hhx"
-				" retries=%hhx tsf_delta=%x location_req=%hhx "
-				" asap=%hhx enable=%hhx rssi=%hhx\n",
+				"ap %.2d: channel_num=%hhd bw=%hhd"
+				" control=%hhd bssid=%pM type=%hhd"
+				" num_of_bursts=%hhd burst_period=%hd ftm=%hhd"
+				" retries=%hhd tsf_delta=%d"
+				" tsf_delta_direction=%hhd location_req=0x%hhx "
+				" asap=%hhd enable=%hhd rssi=%hhd\n",
 				i, ap->channel_num, ap->bandwidth,
 				ap->ctrl_ch_position, ap->bssid,
 				ap->measure_type, ap->num_of_bursts,
 				ap->burst_period, ap->samples_per_burst,
 				ap->retries_per_sample, ap->tsf_delta,
+				ap->tsf_delta_direction,
 				ap->location_req, ap->asap_mode,
 				ap->enable_dyn_ack, ap->rssi);
 	}
@@ -1019,7 +1026,8 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm *mvm = mvmvif->mvm;
-	int value, ret = 0;
+	u32 value;
+	int ret = 0;
 	char *data;
 
 	mutex_lock(&mvm->mutex);
@@ -1071,12 +1079,12 @@
 	data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
 	if (data) {
 		ret = kstrtou32(data, 10, &value);
-		if (ret == 0 && value) {
+		if (ret == 0 && value)
 			ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
-			goto out;
-		}
+		goto out;
 	}
 
+	ret = -EINVAL;
 out:
 	mutex_unlock(&mvm->mutex);
 	return ret ?: count;
@@ -1099,18 +1107,18 @@
 	mutex_lock(&mvm->mutex);
 
 	pos += scnprintf(buf + pos, bufsz - pos,
-			 "tsf_timer_offset_msec = %hx\n",
+			 "tsf_timer_offset_msec = %hd\n",
 			 cmd->tsf_timer_offset_msec);
-	pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n",
+	pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n",
 			 cmd->min_delta_ftm);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			 "ftm_format_and_bw20M = %hhx\n",
+			 "ftm_format_and_bw20M = %hhd\n",
 			 cmd->ftm_format_and_bw20M);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			 "ftm_format_and_bw40M = %hhx\n",
+			 "ftm_format_and_bw40M = %hhd\n",
 			 cmd->ftm_format_and_bw40M);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			 "ftm_format_and_bw80M = %hhx\n",
+			 "ftm_format_and_bw80M = %hhd\n",
 			 cmd->ftm_format_and_bw80M);
 
 	mutex_unlock(&mvm->mutex);
@@ -1123,8 +1131,8 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm *mvm = mvmvif->mvm;
-	int value, ret = 0;
-	int abort_id;
+	u32 value;
+	int abort_id, ret = 0;
 	char *data;
 
 	mutex_lock(&mvm->mutex);
@@ -1205,11 +1213,11 @@
 		struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
 
 		pos += scnprintf(buf + pos, bufsz - pos,
-				"ap %.2d: bssid=%pM status=%hhx bw=%hhx"
-				" rtt=%x rtt_var=%x rtt_spread=%x"
-				" rssi=%hhx  rssi_spread=%hhx"
-				" range=%x range_var=%x"
-				" time_stamp=%x\n",
+				"ap %.2d: bssid=%pM status=%hhd bw=%hhd"
+				" rtt=%d rtt_var=%d rtt_spread=%d"
+				" rssi=%hhd  rssi_spread=%hhd"
+				" range=%d range_var=%d"
+				" time_stamp=%d\n",
 				i, ap->bssid, ap->measure_status,
 				ap->measure_bw,
 				ap->rtt, ap->rtt_variance, ap->rtt_spread,
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 3b8481f..9b4fbb8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -1495,6 +1495,9 @@
 	if (!debugfs_create_blob("nvm_prod", S_IRUSR,
 				  mvm->debugfs_dir, &mvm->nvm_prod_blob))
 		goto err;
+	if (!debugfs_create_blob("nvm_phy_sku", S_IRUSR,
+				 mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
+		goto err;
 
 	/*
 	 * Create a symlink with mac80211. It will be removed when mac80211
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 7005fa4..c8f3e25 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -192,16 +192,10 @@
 /**
  * enum iwl_device_power_flags - masks for device power command flags
  * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
- *	receiver and transmitter. '0' - does not allow. This flag should be
- *	always set to '1' unless one need to disable actual power down for debug
- *	purposes.
- * @DEVICE_POWER_FLAGS_CAM_MSK: '1' CAM (Continuous Active Mode) is set, meaning
- *	that power management is disabled. '0' Power management is enabled, one
- *	of power schemes is applied.
+ *	receiver and transmitter. '0' - does not allow.
 */
 enum iwl_device_power_flags {
 	DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK	= BIT(0),
-	DEVICE_POWER_FLAGS_CAM_MSK		= BIT(13),
 };
 
 /**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 5e4014a..44ff684 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -268,6 +268,16 @@
 	REPLY_MAX = 0xff,
 };
 
+enum iwl_phy_ops_subcmd_ids {
+	CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
+	DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
+};
+
+/* command groups */
+enum {
+	PHY_OPS_GROUP = 0x4,
+};
+
 /**
  * struct iwl_cmd_response - generic response struct for most commands
  * @status: status of the command asked, changes for each one
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 248b025..834641e2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -616,12 +616,8 @@
 	 * will be empty.
 	 */
 
-	for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
-		if (i < mvm->first_agg_queue && i != IWL_MVM_CMD_QUEUE)
-			mvm->queue_to_mac80211[i] = i;
-		else
-			mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
-	}
+	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
+	mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
 
 	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
 		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 5af0090..9d36ba7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -486,16 +486,18 @@
 	switch (vif->type) {
 	case NL80211_IFTYPE_P2P_DEVICE:
 		iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
-				      IWL_MVM_TX_FIFO_VO, wdg_timeout);
+				      IWL_MVM_OFFCHANNEL_QUEUE,
+				      IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
 		break;
 	case NL80211_IFTYPE_AP:
-		iwl_mvm_enable_ac_txq(mvm, vif->cab_queue,
-				      IWL_MVM_TX_FIFO_MCAST, wdg_timeout);
+		iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
+				      IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
 		/* fall through */
 	default:
 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
 			iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
-					      iwl_mvm_ac_to_tx_fifo[ac],
+					      vif->hw_queue[ac],
+					      iwl_mvm_ac_to_tx_fifo[ac], 0,
 					      wdg_timeout);
 		break;
 	}
@@ -511,14 +513,19 @@
 
 	switch (vif->type) {
 	case NL80211_IFTYPE_P2P_DEVICE:
-		iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, 0);
+		iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
+				    IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT,
+				    0);
 		break;
 	case NL80211_IFTYPE_AP:
-		iwl_mvm_disable_txq(mvm, vif->cab_queue, 0);
+		iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
+				    IWL_MAX_TID_COUNT, 0);
 		/* fall through */
 	default:
 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
-			iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], 0);
+			iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
+					    vif->hw_queue[ac],
+					    IWL_MAX_TID_COUNT, 0);
 	}
 }
 
@@ -1126,9 +1133,9 @@
 	ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
 }
 
-int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
-			    struct ieee80211_vif *vif,
-			    u32 action)
+static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
+				   struct ieee80211_vif *vif,
+				   u32 action)
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mac_ctx_cmd cmd = {};
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 4c497fe..8443e14 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -820,7 +820,7 @@
 				    struct ieee80211_vif *vif,
 				    enum ieee80211_ampdu_mlme_action action,
 				    struct ieee80211_sta *sta, u16 tid,
-				    u16 *ssn, u8 buf_size)
+				    u16 *ssn, u8 buf_size, bool amsdu)
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 	int ret;
@@ -2616,7 +2616,7 @@
 
 	if (mvm_sta->vif->type == NL80211_IFTYPE_AP) {
 		mvmvif->ap_assoc_sta_count--;
-		iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, FW_CTXT_ACTION_MODIFY);
+		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
 	}
 
 	mutex_unlock(&mvm->mutex);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index cf1f514..0d3aff1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -82,7 +82,6 @@
 #include "constants.h"
 #include "tof.h"
 
-#define IWL_INVALID_MAC80211_QUEUE	0xff
 #define IWL_MVM_MAX_ADDRESSES		5
 /* RSSI offset for WkP */
 #define IWL_RSSI_OFFSET 50
@@ -605,7 +604,14 @@
 		u64 on_time_scan;
 	} radio_stats, accu_radio_stats;
 
-	u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
+	struct {
+		/* Map to HW queue */
+		u32 hw_queue_to_mac80211;
+		u8 hw_queue_refcount;
+		bool setup_reserved;
+		u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
+	} queue_info[IWL_MAX_HW_QUEUES];
+	spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
 	atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
 
 	const char *nvm_file_name;
@@ -682,6 +688,7 @@
 	struct debugfs_blob_wrapper nvm_sw_blob;
 	struct debugfs_blob_wrapper nvm_calib_blob;
 	struct debugfs_blob_wrapper nvm_prod_blob;
+	struct debugfs_blob_wrapper nvm_phy_sku_blob;
 
 	struct iwl_mvm_frame_stats drv_rx_stats;
 	spinlock_t drv_stats_lock;
@@ -910,6 +917,12 @@
 			   IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
 }
 
+static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
+{
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
+}
+
 static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
 {
 	bool nvm_lar = mvm->nvm_data->lar_enabled;
@@ -957,6 +970,12 @@
 			   IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
 }
 
+static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
+{
+	/* firmware flag isn't defined yet */
+	return false;
+}
+
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 
 struct iwl_rate_info {
@@ -1129,10 +1148,6 @@
 				    struct ieee80211_vif *vif);
 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
 					 struct ieee80211_vif *exclude_vif);
-int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
-			    struct ieee80211_vif *vif,
-			    u32 action);
-
 /* Bindings */
 int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -1345,14 +1360,20 @@
 }
 
 /* hw scheduler queue config */
-void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
-			const struct iwl_trans_txq_scd_cfg *cfg,
+void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
 			unsigned int wdg_timeout);
-void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags);
+/*
+ * Disable a TXQ.
+ * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
+ */
+void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			 u8 tid, u8 flags);
+int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq);
 
 static inline
-void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
-			   u8 fifo, unsigned int wdg_timeout)
+void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			   u8 fifo, u16 ssn, unsigned int wdg_timeout)
 {
 	struct iwl_trans_txq_scd_cfg cfg = {
 		.fifo = fifo,
@@ -1361,13 +1382,13 @@
 		.frame_limit = IWL_FRAME_LIMIT,
 	};
 
-	iwl_mvm_enable_txq(mvm, queue, 0, &cfg, wdg_timeout);
+	iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
 }
 
 static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
-					  int fifo, int sta_id, int tid,
-					  int frame_limit, u16 ssn,
-					  unsigned int wdg_timeout)
+					  int mac80211_queue, int fifo,
+					  int sta_id, int tid, int frame_limit,
+					  u16 ssn, unsigned int wdg_timeout)
 {
 	struct iwl_trans_txq_scd_cfg cfg = {
 		.fifo = fifo,
@@ -1377,7 +1398,7 @@
 		.aggregate = true,
 	};
 
-	iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout);
+	iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
 }
 
 /* Thermal management and CT-kill */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 328187d..4e4a680 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -316,7 +316,8 @@
 	return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
 				  regulatory, mac_override, phy_sku,
 				  mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
-				  lar_enabled, mac_addr0, mac_addr1);
+				  lar_enabled, mac_addr0, mac_addr1,
+				  mvm->trans->hw_id);
 }
 
 #define MAX_NVM_FILE_LEN	16384
@@ -563,6 +564,10 @@
 				mvm->nvm_prod_blob.data = temp;
 				mvm->nvm_prod_blob.size  = ret;
 				break;
+			case NVM_SECTION_TYPE_PHY_SKU:
+				mvm->nvm_phy_sku_blob.data = temp;
+				mvm->nvm_phy_sku_blob.size  = ret;
+				break;
 			default:
 				if (section == mvm->cfg->nvm_hw_section_num) {
 					mvm->nvm_hw_blob.data = temp;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 3f6428c..064c100 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -89,6 +89,7 @@
 MODULE_LICENSE("GPL");
 
 static const struct iwl_op_mode_ops iwl_mvm_ops;
+static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
 
 struct iwl_mvm_mod_params iwlmvm_mod_params = {
 	.power_scheme = IWL_POWER_SCHEME_BPS,
@@ -222,7 +223,6 @@
  * called from a worker with mvm->mutex held.
  */
 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
-	RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
 	RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
 	RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
 
@@ -257,6 +257,8 @@
 	RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
 		   iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
 	RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
+	RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
+		       iwl_mvm_temp_notif, true),
 
 	RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
 		   true),
@@ -423,7 +425,6 @@
 		hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
 
 	op_mode = hw->priv;
-	op_mode->ops = &iwl_mvm_ops;
 
 	mvm = IWL_OP_MODE_GET_MVM(op_mode);
 	mvm->dev = trans->dev;
@@ -432,6 +433,15 @@
 	mvm->fw = fw;
 	mvm->hw = hw;
 
+	if (iwl_mvm_has_new_rx_api(mvm)) {
+		op_mode->ops = &iwl_mvm_ops_mq;
+	} else {
+		op_mode->ops = &iwl_mvm_ops;
+
+		if (WARN_ON(trans->num_rx_queues > 1))
+			goto out_free;
+	}
+
 	mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
 
 	mvm->aux_queue = 15;
@@ -452,6 +462,7 @@
 	INIT_LIST_HEAD(&mvm->aux_roc_te_list);
 	INIT_LIST_HEAD(&mvm->async_handlers_list);
 	spin_lock_init(&mvm->time_event_lock);
+	spin_lock_init(&mvm->queue_info_lock);
 
 	INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
 	INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
@@ -717,18 +728,11 @@
 	}
 }
 
-static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
-				struct napi_struct *napi,
-				struct iwl_rx_cmd_buffer *rxb)
+static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb,
+			      struct iwl_rx_packet *pkt)
 {
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-	u8 i;
-
-	if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
-		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
-		return;
-	}
+	int i;
 
 	iwl_mvm_rx_check_trigger(mvm, pkt);
 
@@ -768,40 +772,84 @@
 	}
 }
 
+static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
+		       struct napi_struct *napi,
+		       struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+	if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
+		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+	else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
+		iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
+	else
+		iwl_mvm_rx_common(mvm, rxb, pkt);
+}
+
+static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
+			  struct napi_struct *napi,
+			  struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+	if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
+		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+	else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
+		iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
+	else
+		iwl_mvm_rx_common(mvm, rxb, pkt);
+}
+
 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
 {
 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-	int mq = mvm->queue_to_mac80211[queue];
+	unsigned long mq;
+	int q;
 
-	if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+	spin_lock_bh(&mvm->queue_info_lock);
+	mq = mvm->queue_info[queue].hw_queue_to_mac80211;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	if (WARN_ON_ONCE(!mq))
 		return;
 
-	if (atomic_inc_return(&mvm->mac80211_queue_stop_count[mq]) > 1) {
-		IWL_DEBUG_TX_QUEUES(mvm,
-				    "queue %d (mac80211 %d) already stopped\n",
-				    queue, mq);
-		return;
+	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
+		if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
+			IWL_DEBUG_TX_QUEUES(mvm,
+					    "queue %d (mac80211 %d) already stopped\n",
+					    queue, q);
+			continue;
+		}
+
+		ieee80211_stop_queue(mvm->hw, q);
 	}
-
-	ieee80211_stop_queue(mvm->hw, mq);
 }
 
 static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
 {
 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-	int mq = mvm->queue_to_mac80211[queue];
+	unsigned long mq;
+	int q;
 
-	if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+	spin_lock_bh(&mvm->queue_info_lock);
+	mq = mvm->queue_info[queue].hw_queue_to_mac80211;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	if (WARN_ON_ONCE(!mq))
 		return;
 
-	if (atomic_dec_return(&mvm->mac80211_queue_stop_count[mq]) > 0) {
-		IWL_DEBUG_TX_QUEUES(mvm,
-				    "queue %d (mac80211 %d) still stopped\n",
-				    queue, mq);
-		return;
+	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
+		if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
+			IWL_DEBUG_TX_QUEUES(mvm,
+					    "queue %d (mac80211 %d) still stopped\n",
+					    queue, q);
+			continue;
+		}
+
+		ieee80211_wake_queue(mvm->hw, q);
 	}
-
-	ieee80211_wake_queue(mvm->hw, mq);
 }
 
 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
@@ -1347,17 +1395,38 @@
 	return _iwl_mvm_exit_d0i3(mvm);
 }
 
+#define IWL_MVM_COMMON_OPS					\
+	/* these could be differentiated */			\
+	.queue_full = iwl_mvm_stop_sw_queue,			\
+	.queue_not_full = iwl_mvm_wake_sw_queue,		\
+	.hw_rf_kill = iwl_mvm_set_hw_rfkill_state,		\
+	.free_skb = iwl_mvm_free_skb,				\
+	.nic_error = iwl_mvm_nic_error,				\
+	.cmd_queue_full = iwl_mvm_cmd_queue_full,		\
+	.nic_config = iwl_mvm_nic_config,			\
+	.enter_d0i3 = iwl_mvm_enter_d0i3,			\
+	.exit_d0i3 = iwl_mvm_exit_d0i3,				\
+	/* as we only register one, these MUST be common! */	\
+	.start = iwl_op_mode_mvm_start,				\
+	.stop = iwl_op_mode_mvm_stop
+
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
-	.start = iwl_op_mode_mvm_start,
-	.stop = iwl_op_mode_mvm_stop,
-	.rx = iwl_mvm_rx_dispatch,
-	.queue_full = iwl_mvm_stop_sw_queue,
-	.queue_not_full = iwl_mvm_wake_sw_queue,
-	.hw_rf_kill = iwl_mvm_set_hw_rfkill_state,
-	.free_skb = iwl_mvm_free_skb,
-	.nic_error = iwl_mvm_nic_error,
-	.cmd_queue_full = iwl_mvm_cmd_queue_full,
-	.nic_config = iwl_mvm_nic_config,
-	.enter_d0i3 = iwl_mvm_enter_d0i3,
-	.exit_d0i3 = iwl_mvm_exit_d0i3,
+	IWL_MVM_COMMON_OPS,
+	.rx = iwl_mvm_rx,
+};
+
+static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
+			      struct napi_struct *napi,
+			      struct iwl_rx_cmd_buffer *rxb,
+			      unsigned int queue)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+	iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+}
+
+static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
+	IWL_MVM_COMMON_OPS,
+	.rx = iwl_mvm_rx_mq,
+	.rx_rss = iwl_mvm_rx_mq_rss,
 };
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 4645877..723b537 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -306,13 +308,50 @@
 	return radar_detect;
 }
 
+static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
+					   struct ieee80211_vif *vif,
+					   struct iwl_mac_power_cmd *cmd,
+					   bool host_awake)
+{
+	int dtimper = vif->bss_conf.dtim_period ?: 1;
+	int skip;
+
+	/* disable, in case we're supposed to override */
+	cmd->skip_dtim_periods = 0;
+	cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+
+	if (iwl_mvm_power_is_radar(vif))
+		return;
+
+	if (dtimper >= 10)
+		return;
+
+	/* TODO: check that multicast wake lock is off */
+
+	if (host_awake) {
+		if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
+			return;
+		skip = 2;
+	} else {
+		int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
+
+		if (WARN_ON(!dtimper_tu))
+			return;
+		/* configure skip over dtim up to 306TU - 314 msec */
+		skip = max_t(u8, 1, 306 / dtimper_tu);
+	}
+
+	/* the firmware really expects "look at every X DTIMs", so add 1 */
+	cmd->skip_dtim_periods = 1 + skip;
+	cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+}
+
 static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 				    struct ieee80211_vif *vif,
 				    struct iwl_mac_power_cmd *cmd)
 {
 	int dtimper, bi;
 	int keep_alive;
-	bool radar_detect = false;
 	struct iwl_mvm_vif *mvmvif __maybe_unused =
 		iwl_mvm_vif_from_mac80211(vif);
 
@@ -350,16 +389,8 @@
 		cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
 	}
 
-	/* Check if radar detection is required on current channel */
-	radar_detect = iwl_mvm_power_is_radar(vif);
-
-	/* Check skip over DTIM conditions */
-	if (!radar_detect && (dtimper < 10) &&
-	    (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
-	     mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
-		cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
-		cmd->skip_dtim_periods = 3;
-	}
+	iwl_mvm_power_config_skip_dtim(mvm, vif, cmd,
+				       mvm->cur_ucode != IWL_UCODE_WOWLAN);
 
 	if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
 		cmd->rx_data_timeout =
@@ -440,14 +471,14 @@
 int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
 {
 	struct iwl_device_power_cmd cmd = {
-		.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
+		.flags = 0,
 	};
 
 	if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
 		mvm->ps_disabled = true;
 
-	if (mvm->ps_disabled)
-		cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
+	if (!mvm->ps_disabled)
+		cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 :
@@ -964,24 +995,11 @@
 		return 0;
 
 	iwl_mvm_power_build_cmd(mvm, vif, &cmd);
-	if (enable) {
-		/* configure skip over dtim up to 306TU - 314 msec */
-		int dtimper = vif->bss_conf.dtim_period ?: 1;
-		int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
-		bool radar_detect = iwl_mvm_power_is_radar(vif);
 
-		if (WARN_ON(!dtimper_tu))
-			return 0;
+	/* when enabling D0i3, override the skip-over-dtim configuration */
+	if (enable)
+		iwl_mvm_power_config_skip_dtim(mvm, vif, &cmd, false);
 
-		/* Check skip over DTIM conditions */
-		/* TODO: check that multicast wake lock is off */
-		if (!radar_detect && (dtimper < 10)) {
-			cmd.skip_dtim_periods = 306 / dtimper_tu;
-			if (cmd.skip_dtim_periods)
-				cmd.flags |= cpu_to_le16(
-					POWER_FLAGS_SKIP_OVER_DTIM_MSK);
-		}
-	}
 	iwl_mvm_power_log(mvm, &cmd);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 5ae9c8a..34d98b2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -524,14 +524,56 @@
 	return lq_types[type];
 }
 
+static char *rs_pretty_rate(const struct rs_rate *rate)
+{
+	static char buf[40];
+	static const char * const legacy_rates[] = {
+		[IWL_RATE_1M_INDEX] = "1M",
+		[IWL_RATE_2M_INDEX] = "2M",
+		[IWL_RATE_5M_INDEX] = "5.5M",
+		[IWL_RATE_11M_INDEX] = "11M",
+		[IWL_RATE_6M_INDEX] = "6M",
+		[IWL_RATE_9M_INDEX] = "9M",
+		[IWL_RATE_12M_INDEX] = "12M",
+		[IWL_RATE_18M_INDEX] = "18M",
+		[IWL_RATE_24M_INDEX] = "24M",
+		[IWL_RATE_36M_INDEX] = "36M",
+		[IWL_RATE_48M_INDEX] = "48M",
+		[IWL_RATE_54M_INDEX] = "54M",
+	};
+	static const char *const ht_vht_rates[] = {
+		[IWL_RATE_MCS_0_INDEX] = "MCS0",
+		[IWL_RATE_MCS_1_INDEX] = "MCS1",
+		[IWL_RATE_MCS_2_INDEX] = "MCS2",
+		[IWL_RATE_MCS_3_INDEX] = "MCS3",
+		[IWL_RATE_MCS_4_INDEX] = "MCS4",
+		[IWL_RATE_MCS_5_INDEX] = "MCS5",
+		[IWL_RATE_MCS_6_INDEX] = "MCS6",
+		[IWL_RATE_MCS_7_INDEX] = "MCS7",
+		[IWL_RATE_MCS_8_INDEX] = "MCS8",
+		[IWL_RATE_MCS_9_INDEX] = "MCS9",
+	};
+	const char *rate_str;
+
+	if (is_type_legacy(rate->type))
+		rate_str = legacy_rates[rate->index];
+	else if (is_type_ht(rate->type) || is_type_vht(rate->type))
+		rate_str = ht_vht_rates[rate->index];
+	else
+		rate_str = "BAD_RATE";
+
+	sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type),
+		rs_pretty_ant(rate->ant), rate_str);
+	return buf;
+}
+
 static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
 				const char *prefix)
 {
 	IWL_DEBUG_RATE(mvm,
-		       "%s: (%s: %d) ANT: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
-		       prefix, rs_pretty_lq_type(rate->type),
-		       rate->index, rs_pretty_ant(rate->ant),
-		       rate->bw, rate->sgi, rate->ldpc, rate->stbc);
+		       "%s: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
+		       prefix, rs_pretty_rate(rate), rate->bw,
+		       rate->sgi, rate->ldpc, rate->stbc);
 }
 
 static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
@@ -562,8 +604,8 @@
 }
 
 static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
-				      struct iwl_lq_sta *lq_data, u8 tid,
-				      struct ieee80211_sta *sta)
+				     struct iwl_lq_sta *lq_data, u8 tid,
+				     struct ieee80211_sta *sta)
 {
 	int ret = -EAGAIN;
 
@@ -1485,7 +1527,7 @@
 	u32 target_tpt;
 	int rate_idx;
 
-	if (success_ratio > IWL_MVM_RS_SR_NO_DECREASE) {
+	if (success_ratio >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
 		target_tpt = 100 * expected_current_tpt;
 		IWL_DEBUG_RATE(mvm,
 			       "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
@@ -1493,7 +1535,7 @@
 	} else {
 		target_tpt = lq_sta->last_tpt;
 		IWL_DEBUG_RATE(mvm,
-			       "SR %d not thag good. Find rate exceeding ACTUAL_TPT %d\n",
+			       "SR %d not that good. Find rate exceeding ACTUAL_TPT %d\n",
 			       success_ratio, target_tpt);
 	}
 
@@ -1622,6 +1664,51 @@
 	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
 }
 
+static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
+			      struct ieee80211_sta *sta,
+			      struct iwl_lq_sta *lq_sta,
+			      struct iwl_scale_tbl_info *tbl,
+			      enum rs_action scale_action)
+{
+	if (sta->bandwidth != IEEE80211_STA_RX_BW_80)
+		return false;
+
+	if (!is_vht_siso(&tbl->rate))
+		return false;
+
+	if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_80) &&
+	    (tbl->rate.index == IWL_RATE_MCS_0_INDEX) &&
+	    (scale_action == RS_ACTION_DOWNSCALE)) {
+		tbl->rate.bw = RATE_MCS_CHAN_WIDTH_20;
+		tbl->rate.index = IWL_RATE_MCS_4_INDEX;
+		IWL_DEBUG_RATE(mvm, "Switch 80Mhz SISO MCS0 -> 20Mhz MCS4\n");
+		goto tweaked;
+	}
+
+	/* Go back to 80Mhz MCS1 only if we've established that 20Mhz MCS5 is
+	 * sustainable, i.e. we're past the test window. We can't go back
+	 * if MCS5 is just tested as this will happen always after switching
+	 * to 20Mhz MCS4 because the rate stats are cleared.
+	 */
+	if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_20) &&
+	    (((tbl->rate.index == IWL_RATE_MCS_5_INDEX) &&
+	     (scale_action == RS_ACTION_STAY)) ||
+	     ((tbl->rate.index > IWL_RATE_MCS_5_INDEX) &&
+	      (scale_action == RS_ACTION_UPSCALE)))) {
+		tbl->rate.bw = RATE_MCS_CHAN_WIDTH_80;
+		tbl->rate.index = IWL_RATE_MCS_1_INDEX;
+		IWL_DEBUG_RATE(mvm, "Switch 20Mhz SISO MCS5 -> 80Mhz MCS1\n");
+		goto tweaked;
+	}
+
+	return false;
+
+tweaked:
+	rs_set_expected_tpt_table(lq_sta, tbl);
+	rs_rate_scale_clear_tbl_windows(mvm, tbl);
+	return true;
+}
+
 static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
 					 struct iwl_lq_sta *lq_sta,
 					 struct ieee80211_sta *sta,
@@ -2174,9 +2261,9 @@
 	if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) &&
 	    (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) {
 		IWL_DEBUG_RATE(mvm,
-			       "(%s: %d): Test Window: succ %d total %d\n",
-			       rs_pretty_lq_type(rate->type),
-			       index, window->success_counter, window->counter);
+			       "%s: Test Window: succ %d total %d\n",
+			       rs_pretty_rate(rate),
+			       window->success_counter, window->counter);
 
 		/* Can't calculate this yet; not enough history */
 		window->average_tpt = IWL_INVALID_VALUE;
@@ -2253,8 +2340,8 @@
 		high_tpt = tbl->win[high].average_tpt;
 
 	IWL_DEBUG_RATE(mvm,
-		       "(%s: %d): cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
-		       rs_pretty_lq_type(rate->type), index, current_tpt, sr,
+		       "%s: cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
+		       rs_pretty_rate(rate), current_tpt, sr,
 		       low, high, low_tpt, high_tpt);
 
 	scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
@@ -2305,6 +2392,8 @@
 	/* Replace uCode's rate table for the destination station. */
 	if (update_lq) {
 		tbl->rate.index = index;
+		if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK)
+			rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action);
 		rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
 	}
 
@@ -2542,7 +2631,6 @@
 		}
 	}
 
-	rs_dump_rate(mvm, rate, "OPTIMAL RATE");
 	return rate;
 }
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 0a6d47c..5b58f53 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -346,8 +346,8 @@
 	/* This is fine since we don't support multiple AP interfaces */
 	sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
 	if (sta) {
-		struct iwl_mvm_sta *mvmsta;
-		mvmsta = iwl_mvm_sta_from_mac80211(sta);
+		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
 		rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
 
 		if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index fe2f538..a9a3eb6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -234,7 +234,9 @@
 	/* Found a place for all queues - enable them */
 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 		iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
-				      iwl_mvm_ac_to_tx_fifo[ac], wdg_timeout);
+				      mvmsta->hw_queue[ac],
+				      iwl_mvm_ac_to_tx_fifo[ac], 0,
+				      wdg_timeout);
 		mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
 	}
 
@@ -253,7 +255,7 @@
 	/* disable the TDLS STA-specific queues */
 	sta_msk = mvmsta->tfd_queue_msk;
 	for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
-		iwl_mvm_disable_txq(mvm, i, 0);
+		iwl_mvm_disable_txq(mvm, i, i, 0, 0);
 }
 
 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
@@ -277,7 +279,7 @@
 
 	if (vif->type == NL80211_IFTYPE_AP) {
 		mvmvif->ap_assoc_sta_count++;
-		iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, FW_CTXT_ACTION_MODIFY);
+		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
 	}
 
 	spin_lock_init(&mvm_sta->lock);
@@ -292,7 +294,7 @@
 
 	/* HW restart, don't assume the memory has been zeroed */
 	atomic_set(&mvm->pending_frames[sta_id], 0);
-	mvm_sta->tid_disable_agg = 0;
+	mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
 	mvm_sta->tfd_queue_msk = 0;
 
 	/* allocate new queues for a TDLS station */
@@ -472,7 +474,7 @@
 			unsigned long i, msk = mvm->tfd_drained[sta_id];
 
 			for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
-				iwl_mvm_disable_txq(mvm, i, 0);
+				iwl_mvm_disable_txq(mvm, i, i, 0, 0);
 
 			mvm->tfd_drained[sta_id] = 0;
 			IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
@@ -651,8 +653,8 @@
 	lockdep_assert_held(&mvm->mutex);
 
 	/* Map Aux queue to fifo - needs to happen before adding Aux station */
-	iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue,
-			      IWL_MVM_TX_FIFO_MCAST, wdg_timeout);
+	iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
+			      IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
 
 	/* Allocate aux station and assign to it the aux queue */
 	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
@@ -923,6 +925,7 @@
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_tid_data *tid_data;
 	int txq_id;
+	int ret;
 
 	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
 		return -EINVAL;
@@ -935,17 +938,6 @@
 
 	lockdep_assert_held(&mvm->mutex);
 
-	for (txq_id = mvm->first_agg_queue;
-	     txq_id <= mvm->last_agg_queue; txq_id++)
-		if (mvm->queue_to_mac80211[txq_id] ==
-		    IWL_INVALID_MAC80211_QUEUE)
-			break;
-
-	if (txq_id > mvm->last_agg_queue) {
-		IWL_ERR(mvm, "Failed to allocate agg queue\n");
-		return -EIO;
-	}
-
 	spin_lock_bh(&mvmsta->lock);
 
 	/* possible race condition - we entered D0i3 while starting agg */
@@ -955,8 +947,18 @@
 		return -EIO;
 	}
 
-	/* the new tx queue is still connected to the same mac80211 queue */
-	mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
+	spin_lock_bh(&mvm->queue_info_lock);
+
+	txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
+					 mvm->last_agg_queue);
+	if (txq_id < 0) {
+		ret = txq_id;
+		spin_unlock_bh(&mvm->queue_info_lock);
+		IWL_ERR(mvm, "Failed to allocate agg queue\n");
+		goto release_locks;
+	}
+	mvm->queue_info[txq_id].setup_reserved = true;
+	spin_unlock_bh(&mvm->queue_info_lock);
 
 	tid_data = &mvmsta->tid_data[tid];
 	tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
@@ -975,9 +977,12 @@
 		tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
 	}
 
+	ret = 0;
+
+release_locks:
 	spin_unlock_bh(&mvmsta->lock);
 
-	return 0;
+	return ret;
 }
 
 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -1005,13 +1010,19 @@
 
 	fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 
-	iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
-			       buf_size, ssn, wdg_timeout);
+	iwl_mvm_enable_agg_txq(mvm, queue,
+			       vif->hw_queue[tid_to_mac80211_ac[tid]], fifo,
+			       mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout);
 
 	ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
 	if (ret)
 		return -EIO;
 
+	/* No need to mark as reserved */
+	spin_lock_bh(&mvm->queue_info_lock);
+	mvm->queue_info[queue].setup_reserved = false;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
 	/*
 	 * Even though in theory the peer could have different
 	 * aggregation reorder buffer sizes for different sessions,
@@ -1056,6 +1067,11 @@
 
 	mvmsta->agg_tids &= ~BIT(tid);
 
+	/* No need to mark as reserved anymore */
+	spin_lock_bh(&mvm->queue_info_lock);
+	mvm->queue_info[txq_id].setup_reserved = false;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
 	switch (tid_data->state) {
 	case IWL_AGG_ON:
 		tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
@@ -1073,14 +1089,15 @@
 
 		tid_data->ssn = 0xffff;
 		tid_data->state = IWL_AGG_OFF;
-		mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
 		spin_unlock_bh(&mvmsta->lock);
 
 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 
 		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
 
-		iwl_mvm_disable_txq(mvm, txq_id, 0);
+		iwl_mvm_disable_txq(mvm, txq_id,
+				    vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
+				    0);
 		return 0;
 	case IWL_AGG_STARTING:
 	case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1091,7 +1108,6 @@
 
 		/* No barriers since we are under mutex */
 		lockdep_assert_held(&mvm->mutex);
-		mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
 
 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 		tid_data->state = IWL_AGG_OFF;
@@ -1132,6 +1148,11 @@
 	mvmsta->agg_tids &= ~BIT(tid);
 	spin_unlock_bh(&mvmsta->lock);
 
+	/* No need to mark as reserved */
+	spin_lock_bh(&mvm->queue_info_lock);
+	mvm->queue_info[txq_id].setup_reserved = false;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
 	if (old_state >= IWL_AGG_ON) {
 		iwl_mvm_drain_sta(mvm, mvmsta, true);
 		if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
@@ -1142,12 +1163,11 @@
 
 		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
 
-		iwl_mvm_disable_txq(mvm, tid_data->txq_id, 0);
+		iwl_mvm_disable_txq(mvm, tid_data->txq_id,
+				    vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
+				    0);
 	}
 
-	mvm->queue_to_mac80211[tid_data->txq_id] =
-				IWL_INVALID_MAC80211_QUEUE;
-
 	return 0;
 }
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.c b/drivers/net/wireless/iwlwifi/mvm/tof.c
index 7ae0bd8..4007f1d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tof.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tof.c
@@ -185,6 +185,7 @@
 	}
 
 	cmd->sta_id = mvmvif->bcast_sta.sta_id;
+	memcpy(cmd->bssid, vif->addr, ETH_ALEN);
 	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
 						    IWL_ALWAYS_LONG_GROUP, 0),
 				    0, sizeof(*cmd), cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.h b/drivers/net/wireless/iwlwifi/mvm/tof.h
index 50ae8ad..9beebc3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tof.h
+++ b/drivers/net/wireless/iwlwifi/mvm/tof.h
@@ -60,7 +60,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
-#ifndef __tof
+#ifndef __tof_h__
 #define __tof_h__
 
 #include "fw-api-tof.h"
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index fe7145c..58b762f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -176,17 +176,27 @@
 	struct iwl_dts_measurement_cmd cmd = {
 		.flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
 	};
+	u32 cmdid;
 
-	return iwl_mvm_send_cmd_pdu(mvm, CMD_DTS_MEASUREMENT_TRIGGER, 0,
+	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
+		cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+				   PHY_OPS_GROUP, 0);
+	else
+		cmdid = CMD_DTS_MEASUREMENT_TRIGGER;
+	return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0,
 				    sizeof(cmd), &cmd);
 }
 
 int iwl_mvm_get_temp(struct iwl_mvm *mvm)
 {
 	struct iwl_notification_wait wait_temp_notif;
-	static const u16 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
+	static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
+					    DTS_MEASUREMENT_NOTIF_WIDE) };
 	int ret, temp;
 
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
+		temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION;
+
 	lockdep_assert_held(&mvm->mutex);
 
 	iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 6df5aad..ff8b9bd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -560,15 +560,10 @@
 		IWL_DEBUG_TX_QUEUES(mvm,
 				    "Can continue DELBA flow ssn = next_recl = %d\n",
 				    tid_data->next_reclaimed);
-		iwl_mvm_disable_txq(mvm, tid_data->txq_id, CMD_ASYNC);
+		iwl_mvm_disable_txq(mvm, tid_data->txq_id,
+				    vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
+				    CMD_ASYNC);
 		tid_data->state = IWL_AGG_OFF;
-		/*
-		 * we can't hold the mutex - but since we are after a sequence
-		 * point (call to iwl_mvm_disable_txq(), so we don't even need
-		 * a memory barrier.
-		 */
-		mvm->queue_to_mac80211[tid_data->txq_id] =
-					IWL_INVALID_MAC80211_QUEUE;
 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 		break;
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 06cba97..ad0f169 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -657,34 +658,143 @@
 	if (mvm->support_umac_log)
 		iwl_mvm_dump_umac_error_log(mvm);
 }
-void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
-			const struct iwl_trans_txq_scd_cfg *cfg,
-			unsigned int wdg_timeout)
-{
-	struct iwl_scd_txq_cfg_cmd cmd = {
-		.scd_queue = queue,
-		.enable = 1,
-		.window = cfg->frame_limit,
-		.sta_id = cfg->sta_id,
-		.ssn = cpu_to_le16(ssn),
-		.tx_fifo = cfg->fifo,
-		.aggregate = cfg->aggregate,
-		.tid = cfg->tid,
-	};
 
-	iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
-	WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
-	     "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
+int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq)
+{
+	int i;
+
+	lockdep_assert_held(&mvm->queue_info_lock);
+
+	for (i = minq; i <= maxq; i++)
+		if (mvm->queue_info[i].hw_queue_refcount == 0 &&
+		    !mvm->queue_info[i].setup_reserved)
+			return i;
+
+	return -ENOSPC;
 }
 
-void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags)
+void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
+			unsigned int wdg_timeout)
+{
+	bool enable_queue = true;
+
+	spin_lock_bh(&mvm->queue_info_lock);
+
+	/* Make sure this TID isn't already enabled */
+	if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
+		spin_unlock_bh(&mvm->queue_info_lock);
+		IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n",
+			cfg->tid);
+		return;
+	}
+
+	/* Update mappings and refcounts */
+	mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
+	mvm->queue_info[queue].hw_queue_refcount++;
+	if (mvm->queue_info[queue].hw_queue_refcount > 1)
+		enable_queue = false;
+	mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
+
+	IWL_DEBUG_TX_QUEUES(mvm,
+			    "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
+			    queue, mvm->queue_info[queue].hw_queue_refcount,
+			    mvm->queue_info[queue].hw_queue_to_mac80211);
+
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	/* Send the enabling command if we need to */
+	if (enable_queue) {
+		struct iwl_scd_txq_cfg_cmd cmd = {
+			.scd_queue = queue,
+			.enable = 1,
+			.window = cfg->frame_limit,
+			.sta_id = cfg->sta_id,
+			.ssn = cpu_to_le16(ssn),
+			.tx_fifo = cfg->fifo,
+			.aggregate = cfg->aggregate,
+			.tid = cfg->tid,
+		};
+
+		iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
+					 wdg_timeout);
+		WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
+					  &cmd),
+		     "Failed to configure queue %d on FIFO %d\n", queue,
+		     cfg->fifo);
+	}
+}
+
+void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			 u8 tid, u8 flags)
 {
 	struct iwl_scd_txq_cfg_cmd cmd = {
 		.scd_queue = queue,
 		.enable = 0,
 	};
+	bool remove_mac_queue = true;
 	int ret;
 
+	spin_lock_bh(&mvm->queue_info_lock);
+
+	if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
+		spin_unlock_bh(&mvm->queue_info_lock);
+		return;
+	}
+
+	mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+	/*
+	 * If there is another TID with the same AC - don't remove the MAC queue
+	 * from the mapping
+	 */
+	if (tid < IWL_MAX_TID_COUNT) {
+		unsigned long tid_bitmap =
+			mvm->queue_info[queue].tid_bitmap;
+		int ac = tid_to_mac80211_ac[tid];
+		int i;
+
+		for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
+			if (tid_to_mac80211_ac[i] == ac)
+				remove_mac_queue = false;
+		}
+	}
+
+	if (remove_mac_queue)
+		mvm->queue_info[queue].hw_queue_to_mac80211 &=
+			~BIT(mac80211_queue);
+	mvm->queue_info[queue].hw_queue_refcount--;
+
+	cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
+
+	IWL_DEBUG_TX_QUEUES(mvm,
+			    "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
+			    queue,
+			    mvm->queue_info[queue].hw_queue_refcount,
+			    mvm->queue_info[queue].hw_queue_to_mac80211);
+
+	/* If the queue is still enabled - nothing left to do in this func */
+	if (cmd.enable) {
+		spin_unlock_bh(&mvm->queue_info_lock);
+		return;
+	}
+
+	/* Make sure queue info is correct even though we overwrite it */
+	WARN(mvm->queue_info[queue].hw_queue_refcount ||
+	     mvm->queue_info[queue].tid_bitmap ||
+	     mvm->queue_info[queue].hw_queue_to_mac80211,
+	     "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
+	     queue, mvm->queue_info[queue].hw_queue_refcount,
+	     mvm->queue_info[queue].hw_queue_to_mac80211,
+	     mvm->queue_info[queue].tid_bitmap);
+
+	/* If we are here - the queue is freed and we can zero out these vals */
+	mvm->queue_info[queue].hw_queue_refcount = 0;
+	mvm->queue_info[queue].tid_bitmap = 0;
+	mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
+
+	spin_unlock_bh(&mvm->queue_info_lock);
+
 	iwl_trans_txq_disable(mvm->trans, queue, false);
 	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
 				   sizeof(cmd), &cmd);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 520bef8..ee46f46 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1819,7 +1819,7 @@
 				       struct ieee80211_vif *vif,
 				       enum ieee80211_ampdu_mlme_action action,
 				       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-				       u8 buf_size)
+				       u8 buf_size, bool amsdu)
 {
 	switch (action) {
 	case IEEE80211_AMPDU_TX_START:
@@ -2190,9 +2190,8 @@
 				   struct genl_info *info)
 {
 	if (info)
-		genl_notify(&hwsim_genl_family, mcast_skb,
-			    genl_info_net(info), info->snd_portid,
-			    HWSIM_MCGRP_CONFIG, info->nlhdr, GFP_KERNEL);
+		genl_notify(&hwsim_genl_family, mcast_skb, info,
+			    HWSIM_MCGRP_CONFIG, GFP_KERNEL);
 	else
 		genlmsg_multicast(&hwsim_genl_family, mcast_skb, 0,
 				  HWSIM_MCGRP_CONFIG, GFP_KERNEL);
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 169384b..f715eee 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -335,7 +335,8 @@
 static int
 mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		  enum ieee80211_ampdu_mlme_action action,
-		  struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size)
+		  struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
+		  bool amsdu)
 {
 	struct mt7601u_dev *dev = hw->priv;
 	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 2c5ffa1..aa498e0 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -173,7 +173,6 @@
 	int pad = 0, aggr_num = 0, ret;
 	struct mwifiex_tx_param tx_param;
 	struct txpd *ptx_pd = NULL;
-	struct timeval tv;
 	int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
 
 	skb_src = skb_peek(&pra_list->skb_head);
@@ -204,8 +203,7 @@
 	skb_aggr->priority = skb_src->priority;
 	skb_aggr->tstamp = skb_src->tstamp;
 
-	do_gettimeofday(&tv);
-	skb_aggr->tstamp = timeval_to_ktime(tv);
+	skb_aggr->tstamp = ktime_get_real();
 
 	do {
 		/* Check if AMSDU can accommodate this MSDU */
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 2906cd5..b3970a8 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -615,10 +615,10 @@
 	    ((end_win > start_win) && ((seq_num > end_win) ||
 				       (seq_num < start_win)))) {
 		end_win = seq_num;
-		if (((seq_num - win_size) + 1) >= 0)
+		if (((end_win - win_size) + 1) >= 0)
 			start_win = (end_win - win_size) + 1;
 		else
-			start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
+			start_win = (MAX_TID_VALUE - (win_size - end_win)) + 1;
 		mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
 	}
 
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 317d991..279167d 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -33,12 +33,12 @@
 	  mwifiex_pcie.
 
 config MWIFIEX_USB
-	tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897/8997"
+	tristate "Marvell WiFi-Ex Driver for USB8766/8797/8997"
 	depends on MWIFIEX && USB
 	select FW_LOADER
 	---help---
 	  This adds support for wireless adapters based on Marvell
-	  8797/8897/8997 chipset with USB interface.
+	  8797/8997 chipset with USB interface.
 
 	  If you choose to build it as a module, it will be called
 	  mwifiex_usb.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 30cbafb..b7ac45f 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -2374,7 +2374,7 @@
  * CFG802.11 operation handler for scan request.
  *
  * This function issues a scan request to the firmware based upon
- * the user specified scan configuration. On successfull completion,
+ * the user specified scan configuration. On successful completion,
  * it also informs the results.
  */
 static int
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 5583856..9824d8d 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -856,6 +856,56 @@
 	return ret;
 }
 
+static ssize_t
+mwifiex_timeshare_coex_read(struct file *file, char __user *ubuf,
+			    size_t count, loff_t *ppos)
+{
+	struct mwifiex_private *priv = file->private_data;
+	char buf[3];
+	bool timeshare_coex;
+	int ret;
+	unsigned int len;
+
+	if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15)
+		return -EOPNOTSUPP;
+
+	ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX,
+			       HostCmd_ACT_GEN_GET, 0, &timeshare_coex, true);
+	if (ret)
+		return ret;
+
+	len = sprintf(buf, "%d\n", timeshare_coex);
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static ssize_t
+mwifiex_timeshare_coex_write(struct file *file, const char __user *ubuf,
+			     size_t count, loff_t *ppos)
+{
+	bool timeshare_coex;
+	struct mwifiex_private *priv = file->private_data;
+	char kbuf[16];
+	int ret;
+
+	if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15)
+		return -EOPNOTSUPP;
+
+	memset(kbuf, 0, sizeof(kbuf));
+
+	if (copy_from_user(&kbuf, ubuf, min_t(size_t, sizeof(kbuf) - 1, count)))
+		return -EFAULT;
+
+	if (strtobool(kbuf, &timeshare_coex))
+		return -EINVAL;
+
+	ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX,
+			       HostCmd_ACT_GEN_SET, 0, &timeshare_coex, true);
+	if (ret)
+		return ret;
+	else
+		return count;
+}
+
 #define MWIFIEX_DFS_ADD_FILE(name) do {                                 \
 	if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir,        \
 			priv, &mwifiex_dfs_##name##_fops))              \
@@ -892,6 +942,7 @@
 MWIFIEX_DFS_FILE_OPS(hscfg);
 MWIFIEX_DFS_FILE_OPS(histogram);
 MWIFIEX_DFS_FILE_OPS(debug_mask);
+MWIFIEX_DFS_FILE_OPS(timeshare_coex);
 
 /*
  * This function creates the debug FS directory structure and the files.
@@ -918,6 +969,7 @@
 	MWIFIEX_DFS_ADD_FILE(hscfg);
 	MWIFIEX_DFS_ADD_FILE(histogram);
 	MWIFIEX_DFS_ADD_FILE(debug_mask);
+	MWIFIEX_DFS_ADD_FILE(timeshare_coex);
 }
 
 /*
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 0e6f029..1e1e81a 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -101,6 +101,9 @@
 #define FIRMWARE_READY_SDIO				0xfedc
 #define FIRMWARE_READY_PCIE				0xfedcba00
 
+#define MWIFIEX_COEX_MODE_TIMESHARE			0x01
+#define MWIFIEX_COEX_MODE_SPATIAL			0x82
+
 enum mwifiex_usb_ep {
 	MWIFIEX_USB_EP_CMD_EVENT = 1,
 	MWIFIEX_USB_EP_DATA = 2,
@@ -163,6 +166,7 @@
 #define TLV_TYPE_CHANRPT_11H_BASIC  (PROPRIETARY_TLV_BASE_ID + 91)
 #define TLV_TYPE_UAP_RETRY_LIMIT    (PROPRIETARY_TLV_BASE_ID + 93)
 #define TLV_TYPE_WAPI_IE            (PROPRIETARY_TLV_BASE_ID + 94)
+#define TLV_TYPE_ROBUST_COEX        (PROPRIETARY_TLV_BASE_ID + 96)
 #define TLV_TYPE_UAP_MGMT_FRAME     (PROPRIETARY_TLV_BASE_ID + 104)
 #define TLV_TYPE_MGMT_IE            (PROPRIETARY_TLV_BASE_ID + 105)
 #define TLV_TYPE_AUTO_DS_PARAM      (PROPRIETARY_TLV_BASE_ID + 113)
@@ -354,6 +358,7 @@
 #define HostCmd_CMD_AMSDU_AGGR_CTRL                   0x00df
 #define HostCmd_CMD_TXPWR_CFG                         0x00d1
 #define HostCmd_CMD_TX_RATE_CFG                       0x00d6
+#define HostCmd_CMD_ROBUST_COEX                       0x00e0
 #define HostCmd_CMD_802_11_PS_MODE_ENH                0x00e4
 #define HostCmd_CMD_802_11_HS_CFG_ENH                 0x00e5
 #define HostCmd_CMD_P2P_MODE_CFG                      0x00eb
@@ -1877,6 +1882,11 @@
 	u8 reserved;
 } __packed;
 
+struct mwifiex_ie_types_robust_coex {
+	struct mwifiex_ie_types_header header;
+	__le32 mode;
+} __packed;
+
 struct host_cmd_ds_version_ext {
 	u8 version_str_sel;
 	char version_str[128];
@@ -2078,6 +2088,11 @@
 	__le16 policy;
 } __packed;
 
+struct host_cmd_ds_robust_coex {
+	__le16 action;
+	__le16 reserved;
+} __packed;
+
 struct host_cmd_ds_command {
 	__le16 command;
 	__le16 size;
@@ -2147,6 +2162,7 @@
 		struct host_cmd_ds_chan_rpt_req chan_rpt_req;
 		struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
 		struct host_cmd_ds_multi_chan_policy mc_policy;
+		struct host_cmd_ds_robust_coex coex;
 	} params;
 } __packed;
 
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 6e3faa7..969ca1e 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -1199,6 +1199,7 @@
 	.ndo_stop = mwifiex_close,
 	.ndo_start_xmit = mwifiex_hard_start_xmit,
 	.ndo_set_mac_address = mwifiex_set_mac_address,
+	.ndo_validate_addr = eth_validate_addr,
 	.ndo_tx_timeout = mwifiex_tx_timeout,
 	.ndo_get_stats = mwifiex_get_stats,
 	.ndo_set_rx_mode = mwifiex_set_multicast_list,
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 504b321..e486867 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -1531,6 +1531,33 @@
 	return 0;
 }
 
+static int mwifiex_cmd_robust_coex(struct mwifiex_private *priv,
+				   struct host_cmd_ds_command *cmd,
+				   u16 cmd_action, bool *is_timeshare)
+{
+	struct host_cmd_ds_robust_coex *coex = &cmd->params.coex;
+	struct mwifiex_ie_types_robust_coex *coex_tlv;
+
+	cmd->command = cpu_to_le16(HostCmd_CMD_ROBUST_COEX);
+	cmd->size = cpu_to_le16(sizeof(*coex) + sizeof(*coex_tlv) + S_DS_GEN);
+
+	coex->action = cpu_to_le16(cmd_action);
+	coex_tlv = (struct mwifiex_ie_types_robust_coex *)
+				((u8 *)coex + sizeof(*coex));
+	coex_tlv->header.type = cpu_to_le16(TLV_TYPE_ROBUST_COEX);
+	coex_tlv->header.len = cpu_to_le16(sizeof(coex_tlv->mode));
+
+	if (coex->action == HostCmd_ACT_GEN_GET)
+		return 0;
+
+	if (*is_timeshare)
+		coex_tlv->mode = cpu_to_le32(MWIFIEX_COEX_MODE_TIMESHARE);
+	else
+		coex_tlv->mode = cpu_to_le32(MWIFIEX_COEX_MODE_SPATIAL);
+
+	return 0;
+}
+
 static int
 mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
 			 struct host_cmd_ds_command *cmd,
@@ -2040,6 +2067,10 @@
 		ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
 						data_buf);
 		break;
+	case HostCmd_CMD_ROBUST_COEX:
+		ret = mwifiex_cmd_robust_coex(priv, cmd_ptr, cmd_action,
+					      data_buf);
+		break;
 	default:
 		mwifiex_dbg(priv->adapter, ERROR,
 			    "PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index d0961635..9ac7aa2 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -1007,6 +1007,28 @@
 	return 0;
 }
 
+static int mwifiex_ret_robust_coex(struct mwifiex_private *priv,
+				   struct host_cmd_ds_command *resp,
+				   bool *is_timeshare)
+{
+	struct host_cmd_ds_robust_coex *coex = &resp->params.coex;
+	struct mwifiex_ie_types_robust_coex *coex_tlv;
+	u16 action = le16_to_cpu(coex->action);
+	u32 mode;
+
+	coex_tlv = (struct mwifiex_ie_types_robust_coex
+		    *)((u8 *)coex + sizeof(struct host_cmd_ds_robust_coex));
+	if (action == HostCmd_ACT_GEN_GET) {
+		mode = le32_to_cpu(coex_tlv->mode);
+		if (mode == MWIFIEX_COEX_MODE_TIMESHARE)
+			*is_timeshare = true;
+		else
+			*is_timeshare = false;
+	}
+
+	return 0;
+}
+
 /*
  * This function handles the command responses.
  *
@@ -1213,6 +1235,9 @@
 		break;
 	case HostCmd_CMD_TDLS_CONFIG:
 		break;
+	case HostCmd_CMD_ROBUST_COEX:
+		ret = mwifiex_ret_robust_coex(priv, resp, data_buf);
+		break;
 	default:
 		mwifiex_dbg(adapter, ERROR,
 			    "CMD_RESP: unknown cmd response %#x\n",
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 4d5a6e3..759a6ad 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -846,22 +846,6 @@
 {
 	enum state_11d_t state_11d;
 
-	if (mwifiex_del_mgmt_ies(priv))
-		mwifiex_dbg(priv->adapter, ERROR,
-			    "Failed to delete mgmt IEs!\n");
-
-	if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
-			     HostCmd_ACT_GEN_SET, 0, NULL, true)) {
-		mwifiex_dbg(priv->adapter, ERROR, "Failed to stop the BSS\n");
-		return -1;
-	}
-
-	if (mwifiex_send_cmd(priv, HOST_CMD_APCMD_SYS_RESET,
-			     HostCmd_ACT_GEN_SET, 0, NULL, true)) {
-		mwifiex_dbg(priv->adapter, ERROR, "Failed to reset BSS\n");
-		return -1;
-	}
-
 	if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
 			     HostCmd_ACT_GEN_SET,
 			     UAP_BSS_PARAMS_I, bss_cfg, false)) {
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 9f5356e..e43aff9 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -42,11 +42,6 @@
 	{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8801_PID_2,
 				       USB_CLASS_VENDOR_SPEC,
 				       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
-	/* 8897 */
-	{USB_DEVICE(USB8XXX_VID, USB8897_PID_1)},
-	{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
-				       USB_CLASS_VENDOR_SPEC,
-				       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
 	/* 8997 */
 	{USB_DEVICE(USB8XXX_VID, USB8997_PID_1)},
 	{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8997_PID_2,
@@ -403,14 +398,12 @@
 	case USB8766_PID_1:
 	case USB8797_PID_1:
 	case USB8801_PID_1:
-	case USB8897_PID_1:
 	case USB8997_PID_1:
 		card->usb_boot_state = USB8XXX_FW_DNLD;
 		break;
 	case USB8766_PID_2:
 	case USB8797_PID_2:
 	case USB8801_PID_2:
-	case USB8897_PID_2:
 	case USB8997_PID_2:
 		card->usb_boot_state = USB8XXX_FW_READY;
 		break;
@@ -964,12 +957,6 @@
 		strcpy(adapter->fw_name, USB8997_DEFAULT_FW_NAME);
 		adapter->ext_scan = true;
 		break;
-	case USB8897_PID_1:
-	case USB8897_PID_2:
-		adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
-		strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
-		adapter->ext_scan = true;
-		break;
 	case USB8766_PID_1:
 	case USB8766_PID_2:
 		adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
@@ -1277,5 +1264,4 @@
 MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME);
-MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
index bab10ee..b4e9246 100644
--- a/drivers/net/wireless/mwifiex/usb.h
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -28,11 +28,9 @@
 #define USB8766_PID_2		0x2042
 #define USB8797_PID_1		0x2043
 #define USB8797_PID_2		0x2044
-#define USB8897_PID_1		0x2045
-#define USB8897_PID_2		0x2046
 #define USB8801_PID_1		0x2049
 #define USB8801_PID_2		0x204a
-#define USB8997_PID_1		0x204d
+#define USB8997_PID_1		0x2052
 #define USB8997_PID_2		0x204e
 
 
@@ -48,7 +46,6 @@
 #define USB8766_DEFAULT_FW_NAME	"mrvl/usb8766_uapsta.bin"
 #define USB8797_DEFAULT_FW_NAME	"mrvl/usb8797_uapsta.bin"
 #define USB8801_DEFAULT_FW_NAME	"mrvl/usb8801_uapsta.bin"
-#define USB8897_DEFAULT_FW_NAME	"mrvl/usb8897_uapsta.bin"
 #define USB8997_DEFAULT_FW_NAME	"mrvl/usb8997_uapsta.bin"
 
 #define FW_DNLD_TX_BUF_SIZE	620
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 7bbdfe4..acccd67 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -117,22 +117,15 @@
  */
 static u8 mwifiex_get_random_ba_threshold(void)
 {
-	u32 sec, usec;
-	struct timeval ba_tstamp;
-	u8 ba_threshold;
-
+	u64 ns;
 	/* setup ba_packet_threshold here random number between
 	 * [BA_SETUP_PACKET_OFFSET,
 	 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
 	 */
+	ns = ktime_get_ns();
+	ns += (ns >> 32) + (ns >> 16);
 
-	do_gettimeofday(&ba_tstamp);
-	sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
-	usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
-	ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
-						      + BA_SETUP_PACKET_OFFSET;
-
-	return ba_threshold;
+	return ((u8)ns % BA_SETUP_MAX_PACKET_THRESHOLD) + BA_SETUP_PACKET_OFFSET;
 }
 
 /*
@@ -691,7 +684,7 @@
 			if (!memcmp(ra_list->ra, mac, ETH_ALEN))
 				continue;
 
-			if (ra_list && ra_list->tx_paused != tx_pause) {
+			if (ra_list->tx_paused != tx_pause) {
 				pkt_cnt += ra_list->total_pkt_count;
 				ra_list->tx_paused = tx_pause;
 				if (tx_pause)
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 9420fc6..30e3aaa 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -5423,7 +5423,7 @@
 mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		   enum ieee80211_ampdu_mlme_action action,
 		   struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-		   u8 buf_size)
+		   u8 buf_size, bool amsdu)
 {
 
 	int i, rc = 0;
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index a9e94b6..0f6ea31 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -220,7 +220,7 @@
 	if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
 		/* Set fragmentation */
 		if (priv->has_mwo) {
-			if (wiphy->frag_threshold < 0)
+			if (wiphy->frag_threshold == -1)
 				frag_value = 0;
 			else {
 				printk(KERN_WARNING "%s: Fixed fragmentation "
@@ -230,7 +230,7 @@
 				frag_value = 1;
 			}
 		} else {
-			if (wiphy->frag_threshold < 0)
+			if (wiphy->frag_threshold == -1)
 				frag_value = 2346;
 			else if ((wiphy->frag_threshold < 257) ||
 				 (wiphy->frag_threshold > 2347))
@@ -252,7 +252,7 @@
 		 * the upper limit.
 		 */
 
-		if (wiphy->rts_threshold < 0)
+		if (wiphy->rts_threshold == -1)
 			rts_value = 2347;
 		else if (wiphy->rts_threshold > 2347)
 			err = -EINVAL;
diff --git a/drivers/net/wireless/realtek/Makefile b/drivers/net/wireless/realtek/Makefile
new file mode 100644
index 0000000..9c78deb
--- /dev/null
+++ b/drivers/net/wireless/realtek/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Linux Wireless network device drivers for Realtek units
+#
+
+obj-$(CONFIG_RTL8180)		+= rtl818x/
+obj-$(CONFIG_RTL8187)		+= rtl818x/
+obj-$(CONFIG_RTLWIFI)		+= rtlwifi/
+obj-$(CONFIG_RTL8XXXU)		+= rtl8xxxu/
+
diff --git a/drivers/net/wireless/rtl818x/Kconfig b/drivers/net/wireless/realtek/rtl818x/Kconfig
similarity index 100%
rename from drivers/net/wireless/rtl818x/Kconfig
rename to drivers/net/wireless/realtek/rtl818x/Kconfig
diff --git a/drivers/net/wireless/rtl818x/Makefile b/drivers/net/wireless/realtek/rtl818x/Makefile
similarity index 100%
rename from drivers/net/wireless/rtl818x/Makefile
rename to drivers/net/wireless/realtek/rtl818x/Makefile
diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
similarity index 69%
rename from drivers/net/wireless/rtl818x/rtl8180/Makefile
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
index 21005bd..2966681 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
@@ -2,4 +2,4 @@
 
 obj-$(CONFIG_RTL8180)	+= rtl818x_pci.o
 
-ccflags-y += -Idrivers/net/wireless/rtl818x
+ccflags-y += -Idrivers/net/wireless/realtek/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/dev.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/grf5101.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/grf5101.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/max2820.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/max2820.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8225.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/sa2400.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/sa2400.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
similarity index 62%
rename from drivers/net/wireless/rtl818x/rtl8187/Makefile
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
index 7b62992..ff07491 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
@@ -2,4 +2,4 @@
 
 obj-$(CONFIG_RTL8187)	+= rtl8187.o
 
-ccflags-y += -Idrivers/net/wireless/rtl818x
+ccflags-y += -Idrivers/net/wireless/realtek/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/dev.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/leds.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/leds.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/leds.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rfkill.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rfkill.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rfkill.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rfkill.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.h
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/realtek/rtl818x/rtl818x.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl818x.h
rename to drivers/net/wireless/realtek/rtl818x/rtl818x.h
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
new file mode 100644
index 0000000..dd4d626
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
@@ -0,0 +1,34 @@
+#
+# RTL8XXXU Wireless LAN device configuration
+#
+config RTL8XXXU
+	tristate "RTL8723AU/RTL8188[CR]U/RTL819[12]CU (mac80211) support"
+	depends on MAC80211 && USB
+	---help---
+	  This is an alternative driver for various Realtek RTL8XXX
+	  parts written to utilize the Linux mac80211 stack.
+	  The driver is known to work with a number of RTL8723AU,
+	  RL8188CU, RTL8188RU, RTL8191CU, and RTL8192CU devices
+
+	  This driver is under development and has a limited feature
+	  set. In particular it does not yet support 40MHz channels
+	  and power management. However it should have a smaller
+	  memory footprint than the vendor drivers and benetifs
+	  from the in kernel mac80211 stack.
+
+	  It can coexist with drivers from drivers/staging/rtl8723au,
+	  drivers/staging/rtl8192u, and drivers/net/wireless/rtlwifi,
+	  but you will need to control which module you wish to load.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called r8xxxu.  If unsure, say N.
+
+config RTL8XXXU_UNTESTED
+	bool "Include support for untested Realtek 8xxx USB devices (EXPERIMENTAL)"
+	depends on RTL8XXXU
+	---help---
+	  This option enables detection of Realtek 8723/8188/8191/8192 WiFi
+	  USB devices which have not been tested directly by the driver
+	  author or reported to be working by third parties.
+
+	  Please report your results!
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Makefile b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
new file mode 100644
index 0000000..5dea3bb
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RTL8XXXU)	+= rtl8xxxu.o
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
new file mode 100644
index 0000000..47e52ad
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
@@ -0,0 +1,5976 @@
+/*
+ * RTL8XXXU mac80211 USB driver
+ *
+ * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+#define DRIVER_NAME "rtl8xxxu"
+
+static int rtl8xxxu_debug;
+static bool rtl8xxxu_ht40_2g;
+
+MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
+MODULE_DESCRIPTION("RTL8XXXu USB mac80211 Wireless LAN Driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin");
+
+module_param_named(debug, rtl8xxxu_debug, int, 0600);
+MODULE_PARM_DESC(debug, "Set debug mask");
+module_param_named(ht40_2g, rtl8xxxu_ht40_2g, bool, 0600);
+MODULE_PARM_DESC(ht40_2g, "Enable HT40 support on the 2.4GHz band");
+
+#define USB_VENDOR_ID_REALTEK		0x0bda
+/* Minimum IEEE80211_MAX_FRAME_LEN */
+#define RTL_RX_BUFFER_SIZE		IEEE80211_MAX_FRAME_LEN
+#define RTL8XXXU_RX_URBS		32
+#define RTL8XXXU_RX_URB_PENDING_WATER	8
+#define RTL8XXXU_TX_URBS		64
+#define RTL8XXXU_TX_URB_LOW_WATER	25
+#define RTL8XXXU_TX_URB_HIGH_WATER	32
+
+static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
+				  struct rtl8xxxu_rx_urb *rx_urb);
+
+static struct ieee80211_rate rtl8xxxu_rates[] = {
+	{ .bitrate = 10, .hw_value = DESC_RATE_1M, .flags = 0 },
+	{ .bitrate = 20, .hw_value = DESC_RATE_2M, .flags = 0 },
+	{ .bitrate = 55, .hw_value = DESC_RATE_5_5M, .flags = 0 },
+	{ .bitrate = 110, .hw_value = DESC_RATE_11M, .flags = 0 },
+	{ .bitrate = 60, .hw_value = DESC_RATE_6M, .flags = 0 },
+	{ .bitrate = 90, .hw_value = DESC_RATE_9M, .flags = 0 },
+	{ .bitrate = 120, .hw_value = DESC_RATE_12M, .flags = 0 },
+	{ .bitrate = 180, .hw_value = DESC_RATE_18M, .flags = 0 },
+	{ .bitrate = 240, .hw_value = DESC_RATE_24M, .flags = 0 },
+	{ .bitrate = 360, .hw_value = DESC_RATE_36M, .flags = 0 },
+	{ .bitrate = 480, .hw_value = DESC_RATE_48M, .flags = 0 },
+	{ .bitrate = 540, .hw_value = DESC_RATE_54M, .flags = 0 },
+};
+
+static struct ieee80211_channel rtl8xxxu_channels_2g[] = {
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+	  .hw_value = 1, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+	  .hw_value = 2, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+	  .hw_value = 3, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+	  .hw_value = 4, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+	  .hw_value = 5, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+	  .hw_value = 6, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+	  .hw_value = 7, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+	  .hw_value = 8, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+	  .hw_value = 9, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+	  .hw_value = 10, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+	  .hw_value = 11, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+	  .hw_value = 12, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+	  .hw_value = 13, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+	  .hw_value = 14, .max_power = 30 }
+};
+
+static struct ieee80211_supported_band rtl8xxxu_supported_band = {
+	.channels = rtl8xxxu_channels_2g,
+	.n_channels = ARRAY_SIZE(rtl8xxxu_channels_2g),
+	.bitrates = rtl8xxxu_rates,
+	.n_bitrates = ARRAY_SIZE(rtl8xxxu_rates),
+};
+
+static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = {
+	{0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
+	{0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+	{0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
+	{0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05},
+	{0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01},
+	{0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f},
+	{0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72},
+	{0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08},
+	{0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
+	{0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
+	{0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
+	{0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
+	{0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
+	{0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
+	{0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16},
+	{0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00},
+	{0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02},
+	{0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
+	{0x652, 0x20}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e},
+	{0x63f, 0x0e}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43},
+	{0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43},
+	{0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff},
+};
+
+static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
+	{0x800, 0x80040000}, {0x804, 0x00000003},
+	{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+	{0x810, 0x10001331}, {0x814, 0x020c3d10},
+	{0x818, 0x02200385}, {0x81c, 0x00000000},
+	{0x820, 0x01000100}, {0x824, 0x00390004},
+	{0x828, 0x00000000}, {0x82c, 0x00000000},
+	{0x830, 0x00000000}, {0x834, 0x00000000},
+	{0x838, 0x00000000}, {0x83c, 0x00000000},
+	{0x840, 0x00010000}, {0x844, 0x00000000},
+	{0x848, 0x00000000}, {0x84c, 0x00000000},
+	{0x850, 0x00000000}, {0x854, 0x00000000},
+	{0x858, 0x569a569a}, {0x85c, 0x001b25a4},
+	{0x860, 0x66f60110}, {0x864, 0x061f0130},
+	{0x868, 0x00000000}, {0x86c, 0x32323200},
+	{0x870, 0x07000760}, {0x874, 0x22004000},
+	{0x878, 0x00000808}, {0x87c, 0x00000000},
+	{0x880, 0xc0083070}, {0x884, 0x000004d5},
+	{0x888, 0x00000000}, {0x88c, 0xccc000c0},
+	{0x890, 0x00000800}, {0x894, 0xfffffffe},
+	{0x898, 0x40302010}, {0x89c, 0x00706050},
+	{0x900, 0x00000000}, {0x904, 0x00000023},
+	{0x908, 0x00000000}, {0x90c, 0x81121111},
+	{0xa00, 0x00d047c8}, {0xa04, 0x80ff000c},
+	{0xa08, 0x8c838300}, {0xa0c, 0x2e68120f},
+	{0xa10, 0x9500bb78}, {0xa14, 0x11144028},
+	{0xa18, 0x00881117}, {0xa1c, 0x89140f00},
+	{0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
+	{0xa28, 0x00000204}, {0xa2c, 0x00d30000},
+	{0xa70, 0x101fbf00}, {0xa74, 0x00000007},
+	{0xa78, 0x00000900},
+	{0xc00, 0x48071d40}, {0xc04, 0x03a05611},
+	{0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
+	{0xc10, 0x08800000}, {0xc14, 0x40000100},
+	{0xc18, 0x08800000}, {0xc1c, 0x40000100},
+	{0xc20, 0x00000000}, {0xc24, 0x00000000},
+	{0xc28, 0x00000000}, {0xc2c, 0x00000000},
+	{0xc30, 0x69e9ac44}, {0xc34, 0x469652af},
+	{0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
+	{0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
+	{0xc48, 0xec020107}, {0xc4c, 0x007f037f},
+	{0xc50, 0x69543420}, {0xc54, 0x43bc0094},
+	{0xc58, 0x69543420}, {0xc5c, 0x433c0094},
+	{0xc60, 0x00000000}, {0xc64, 0x7112848b},
+	{0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
+	{0xc70, 0x2c7f000d}, {0xc74, 0x018610db},
+	{0xc78, 0x0000001f}, {0xc7c, 0x00b91612},
+	{0xc80, 0x40000100}, {0xc84, 0x20f60000},
+	{0xc88, 0x40000100}, {0xc8c, 0x20200000},
+	{0xc90, 0x00121820}, {0xc94, 0x00000000},
+	{0xc98, 0x00121820}, {0xc9c, 0x00007f7f},
+	{0xca0, 0x00000000}, {0xca4, 0x00000080},
+	{0xca8, 0x00000000}, {0xcac, 0x00000000},
+	{0xcb0, 0x00000000}, {0xcb4, 0x00000000},
+	{0xcb8, 0x00000000}, {0xcbc, 0x28000000},
+	{0xcc0, 0x00000000}, {0xcc4, 0x00000000},
+	{0xcc8, 0x00000000}, {0xccc, 0x00000000},
+	{0xcd0, 0x00000000}, {0xcd4, 0x00000000},
+	{0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
+	{0xce0, 0x00222222}, {0xce4, 0x00000000},
+	{0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
+	{0xd00, 0x00080740}, {0xd04, 0x00020401},
+	{0xd08, 0x0000907f}, {0xd0c, 0x20010201},
+	{0xd10, 0xa0633333}, {0xd14, 0x3333bc43},
+	{0xd18, 0x7a8f5b6b}, {0xd2c, 0xcc979975},
+	{0xd30, 0x00000000}, {0xd34, 0x80608000},
+	{0xd38, 0x00000000}, {0xd3c, 0x00027293},
+	{0xd40, 0x00000000}, {0xd44, 0x00000000},
+	{0xd48, 0x00000000}, {0xd4c, 0x00000000},
+	{0xd50, 0x6437140a}, {0xd54, 0x00000000},
+	{0xd58, 0x00000000}, {0xd5c, 0x30032064},
+	{0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+	{0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+	{0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+	{0xd78, 0x000e3c24}, {0xe00, 0x2a2a2a2a},
+	{0xe04, 0x2a2a2a2a}, {0xe08, 0x03902a2a},
+	{0xe10, 0x2a2a2a2a}, {0xe14, 0x2a2a2a2a},
+	{0xe18, 0x2a2a2a2a}, {0xe1c, 0x2a2a2a2a},
+	{0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
+	{0xe34, 0x10008c1f}, {0xe38, 0x02140102},
+	{0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
+	{0xe44, 0x01004800}, {0xe48, 0xfb000000},
+	{0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
+	{0xe54, 0x10008c1f}, {0xe58, 0x02140102},
+	{0xe5c, 0x28160d05}, {0xe60, 0x00000008},
+	{0xe68, 0x001b25a4}, {0xe6c, 0x631b25a0},
+	{0xe70, 0x631b25a0}, {0xe74, 0x081b25a0},
+	{0xe78, 0x081b25a0}, {0xe7c, 0x081b25a0},
+	{0xe80, 0x081b25a0}, {0xe84, 0x631b25a0},
+	{0xe88, 0x081b25a0}, {0xe8c, 0x631b25a0},
+	{0xed0, 0x631b25a0}, {0xed4, 0x631b25a0},
+	{0xed8, 0x631b25a0}, {0xedc, 0x001b25a0},
+	{0xee0, 0x001b25a0}, {0xeec, 0x6b1b25a0},
+	{0xf14, 0x00000003}, {0xf4c, 0x00000000},
+	{0xf00, 0x00000300},
+	{0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8192cu_phy_2t_init_table[] = {
+	{0x024, 0x0011800f}, {0x028, 0x00ffdb83},
+	{0x800, 0x80040002}, {0x804, 0x00000003},
+	{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+	{0x810, 0x10000330}, {0x814, 0x020c3d10},
+	{0x818, 0x02200385}, {0x81c, 0x00000000},
+	{0x820, 0x01000100}, {0x824, 0x00390004},
+	{0x828, 0x01000100}, {0x82c, 0x00390004},
+	{0x830, 0x27272727}, {0x834, 0x27272727},
+	{0x838, 0x27272727}, {0x83c, 0x27272727},
+	{0x840, 0x00010000}, {0x844, 0x00010000},
+	{0x848, 0x27272727}, {0x84c, 0x27272727},
+	{0x850, 0x00000000}, {0x854, 0x00000000},
+	{0x858, 0x569a569a}, {0x85c, 0x0c1b25a4},
+	{0x860, 0x66e60230}, {0x864, 0x061f0130},
+	{0x868, 0x27272727}, {0x86c, 0x2b2b2b27},
+	{0x870, 0x07000700}, {0x874, 0x22184000},
+	{0x878, 0x08080808}, {0x87c, 0x00000000},
+	{0x880, 0xc0083070}, {0x884, 0x000004d5},
+	{0x888, 0x00000000}, {0x88c, 0xcc0000c0},
+	{0x890, 0x00000800}, {0x894, 0xfffffffe},
+	{0x898, 0x40302010}, {0x89c, 0x00706050},
+	{0x900, 0x00000000}, {0x904, 0x00000023},
+	{0x908, 0x00000000}, {0x90c, 0x81121313},
+	{0xa00, 0x00d047c8}, {0xa04, 0x80ff000c},
+	{0xa08, 0x8c838300}, {0xa0c, 0x2e68120f},
+	{0xa10, 0x9500bb78}, {0xa14, 0x11144028},
+	{0xa18, 0x00881117}, {0xa1c, 0x89140f00},
+	{0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
+	{0xa28, 0x00000204}, {0xa2c, 0x00d30000},
+	{0xa70, 0x101fbf00}, {0xa74, 0x00000007},
+	{0xc00, 0x48071d40}, {0xc04, 0x03a05633},
+	{0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
+	{0xc10, 0x08800000}, {0xc14, 0x40000100},
+	{0xc18, 0x08800000}, {0xc1c, 0x40000100},
+	{0xc20, 0x00000000}, {0xc24, 0x00000000},
+	{0xc28, 0x00000000}, {0xc2c, 0x00000000},
+	{0xc30, 0x69e9ac44}, {0xc34, 0x469652cf},
+	{0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
+	{0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
+	{0xc48, 0xec020107}, {0xc4c, 0x007f037f},
+	{0xc50, 0x69543420}, {0xc54, 0x43bc0094},
+	{0xc58, 0x69543420}, {0xc5c, 0x433c0094},
+	{0xc60, 0x00000000}, {0xc64, 0x5116848b},
+	{0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
+	{0xc70, 0x2c7f000d}, {0xc74, 0x2186115b},
+	{0xc78, 0x0000001f}, {0xc7c, 0x00b99612},
+	{0xc80, 0x40000100}, {0xc84, 0x20f60000},
+	{0xc88, 0x40000100}, {0xc8c, 0xa0e40000},
+	{0xc90, 0x00121820}, {0xc94, 0x00000000},
+	{0xc98, 0x00121820}, {0xc9c, 0x00007f7f},
+	{0xca0, 0x00000000}, {0xca4, 0x00000080},
+	{0xca8, 0x00000000}, {0xcac, 0x00000000},
+	{0xcb0, 0x00000000}, {0xcb4, 0x00000000},
+	{0xcb8, 0x00000000}, {0xcbc, 0x28000000},
+	{0xcc0, 0x00000000}, {0xcc4, 0x00000000},
+	{0xcc8, 0x00000000}, {0xccc, 0x00000000},
+	{0xcd0, 0x00000000}, {0xcd4, 0x00000000},
+	{0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
+	{0xce0, 0x00222222}, {0xce4, 0x00000000},
+	{0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
+	{0xd00, 0x00080740}, {0xd04, 0x00020403},
+	{0xd08, 0x0000907f}, {0xd0c, 0x20010201},
+	{0xd10, 0xa0633333}, {0xd14, 0x3333bc43},
+	{0xd18, 0x7a8f5b6b}, {0xd2c, 0xcc979975},
+	{0xd30, 0x00000000}, {0xd34, 0x80608000},
+	{0xd38, 0x00000000}, {0xd3c, 0x00027293},
+	{0xd40, 0x00000000}, {0xd44, 0x00000000},
+	{0xd48, 0x00000000}, {0xd4c, 0x00000000},
+	{0xd50, 0x6437140a}, {0xd54, 0x00000000},
+	{0xd58, 0x00000000}, {0xd5c, 0x30032064},
+	{0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+	{0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+	{0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+	{0xd78, 0x000e3c24}, {0xe00, 0x2a2a2a2a},
+	{0xe04, 0x2a2a2a2a}, {0xe08, 0x03902a2a},
+	{0xe10, 0x2a2a2a2a}, {0xe14, 0x2a2a2a2a},
+	{0xe18, 0x2a2a2a2a}, {0xe1c, 0x2a2a2a2a},
+	{0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
+	{0xe34, 0x10008c1f}, {0xe38, 0x02140102},
+	{0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
+	{0xe44, 0x01004800}, {0xe48, 0xfb000000},
+	{0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
+	{0xe54, 0x10008c1f}, {0xe58, 0x02140102},
+	{0xe5c, 0x28160d05}, {0xe60, 0x00000010},
+	{0xe68, 0x001b25a4}, {0xe6c, 0x63db25a4},
+	{0xe70, 0x63db25a4}, {0xe74, 0x0c1b25a4},
+	{0xe78, 0x0c1b25a4}, {0xe7c, 0x0c1b25a4},
+	{0xe80, 0x0c1b25a4}, {0xe84, 0x63db25a4},
+	{0xe88, 0x0c1b25a4}, {0xe8c, 0x63db25a4},
+	{0xed0, 0x63db25a4}, {0xed4, 0x63db25a4},
+	{0xed8, 0x63db25a4}, {0xedc, 0x001b25a4},
+	{0xee0, 0x001b25a4}, {0xeec, 0x6fdb25a4},
+	{0xf14, 0x00000003}, {0xf4c, 0x00000000},
+	{0xf00, 0x00000300},
+	{0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8188ru_phy_1t_highpa_table[] = {
+	{0x024, 0x0011800f}, {0x028, 0x00ffdb83},
+	{0x040, 0x000c0004}, {0x800, 0x80040000},
+	{0x804, 0x00000001}, {0x808, 0x0000fc00},
+	{0x80c, 0x0000000a}, {0x810, 0x10005388},
+	{0x814, 0x020c3d10}, {0x818, 0x02200385},
+	{0x81c, 0x00000000}, {0x820, 0x01000100},
+	{0x824, 0x00390204}, {0x828, 0x00000000},
+	{0x82c, 0x00000000}, {0x830, 0x00000000},
+	{0x834, 0x00000000}, {0x838, 0x00000000},
+	{0x83c, 0x00000000}, {0x840, 0x00010000},
+	{0x844, 0x00000000}, {0x848, 0x00000000},
+	{0x84c, 0x00000000}, {0x850, 0x00000000},
+	{0x854, 0x00000000}, {0x858, 0x569a569a},
+	{0x85c, 0x001b25a4}, {0x860, 0x66e60230},
+	{0x864, 0x061f0130}, {0x868, 0x00000000},
+	{0x86c, 0x20202000}, {0x870, 0x03000300},
+	{0x874, 0x22004000}, {0x878, 0x00000808},
+	{0x87c, 0x00ffc3f1}, {0x880, 0xc0083070},
+	{0x884, 0x000004d5}, {0x888, 0x00000000},
+	{0x88c, 0xccc000c0}, {0x890, 0x00000800},
+	{0x894, 0xfffffffe}, {0x898, 0x40302010},
+	{0x89c, 0x00706050}, {0x900, 0x00000000},
+	{0x904, 0x00000023}, {0x908, 0x00000000},
+	{0x90c, 0x81121111}, {0xa00, 0x00d047c8},
+	{0xa04, 0x80ff000c}, {0xa08, 0x8c838300},
+	{0xa0c, 0x2e68120f}, {0xa10, 0x9500bb78},
+	{0xa14, 0x11144028}, {0xa18, 0x00881117},
+	{0xa1c, 0x89140f00}, {0xa20, 0x15160000},
+	{0xa24, 0x070b0f12}, {0xa28, 0x00000104},
+	{0xa2c, 0x00d30000}, {0xa70, 0x101fbf00},
+	{0xa74, 0x00000007}, {0xc00, 0x48071d40},
+	{0xc04, 0x03a05611}, {0xc08, 0x000000e4},
+	{0xc0c, 0x6c6c6c6c}, {0xc10, 0x08800000},
+	{0xc14, 0x40000100}, {0xc18, 0x08800000},
+	{0xc1c, 0x40000100}, {0xc20, 0x00000000},
+	{0xc24, 0x00000000}, {0xc28, 0x00000000},
+	{0xc2c, 0x00000000}, {0xc30, 0x69e9ac44},
+	{0xc34, 0x469652cf}, {0xc38, 0x49795994},
+	{0xc3c, 0x0a97971c}, {0xc40, 0x1f7c403f},
+	{0xc44, 0x000100b7}, {0xc48, 0xec020107},
+	{0xc4c, 0x007f037f}, {0xc50, 0x6954342e},
+	{0xc54, 0x43bc0094}, {0xc58, 0x6954342f},
+	{0xc5c, 0x433c0094}, {0xc60, 0x00000000},
+	{0xc64, 0x5116848b}, {0xc68, 0x47c00bff},
+	{0xc6c, 0x00000036}, {0xc70, 0x2c46000d},
+	{0xc74, 0x018610db}, {0xc78, 0x0000001f},
+	{0xc7c, 0x00b91612}, {0xc80, 0x24000090},
+	{0xc84, 0x20f60000}, {0xc88, 0x24000090},
+	{0xc8c, 0x20200000}, {0xc90, 0x00121820},
+	{0xc94, 0x00000000}, {0xc98, 0x00121820},
+	{0xc9c, 0x00007f7f}, {0xca0, 0x00000000},
+	{0xca4, 0x00000080}, {0xca8, 0x00000000},
+	{0xcac, 0x00000000}, {0xcb0, 0x00000000},
+	{0xcb4, 0x00000000}, {0xcb8, 0x00000000},
+	{0xcbc, 0x28000000}, {0xcc0, 0x00000000},
+	{0xcc4, 0x00000000}, {0xcc8, 0x00000000},
+	{0xccc, 0x00000000}, {0xcd0, 0x00000000},
+	{0xcd4, 0x00000000}, {0xcd8, 0x64b22427},
+	{0xcdc, 0x00766932}, {0xce0, 0x00222222},
+	{0xce4, 0x00000000}, {0xce8, 0x37644302},
+	{0xcec, 0x2f97d40c}, {0xd00, 0x00080740},
+	{0xd04, 0x00020401}, {0xd08, 0x0000907f},
+	{0xd0c, 0x20010201}, {0xd10, 0xa0633333},
+	{0xd14, 0x3333bc43}, {0xd18, 0x7a8f5b6b},
+	{0xd2c, 0xcc979975}, {0xd30, 0x00000000},
+	{0xd34, 0x80608000}, {0xd38, 0x00000000},
+	{0xd3c, 0x00027293}, {0xd40, 0x00000000},
+	{0xd44, 0x00000000}, {0xd48, 0x00000000},
+	{0xd4c, 0x00000000}, {0xd50, 0x6437140a},
+	{0xd54, 0x00000000}, {0xd58, 0x00000000},
+	{0xd5c, 0x30032064}, {0xd60, 0x4653de68},
+	{0xd64, 0x04518a3c}, {0xd68, 0x00002101},
+	{0xd6c, 0x2a201c16}, {0xd70, 0x1812362e},
+	{0xd74, 0x322c2220}, {0xd78, 0x000e3c24},
+	{0xe00, 0x24242424}, {0xe04, 0x24242424},
+	{0xe08, 0x03902024}, {0xe10, 0x24242424},
+	{0xe14, 0x24242424}, {0xe18, 0x24242424},
+	{0xe1c, 0x24242424}, {0xe28, 0x00000000},
+	{0xe30, 0x1000dc1f}, {0xe34, 0x10008c1f},
+	{0xe38, 0x02140102}, {0xe3c, 0x681604c2},
+	{0xe40, 0x01007c00}, {0xe44, 0x01004800},
+	{0xe48, 0xfb000000}, {0xe4c, 0x000028d1},
+	{0xe50, 0x1000dc1f}, {0xe54, 0x10008c1f},
+	{0xe58, 0x02140102}, {0xe5c, 0x28160d05},
+	{0xe60, 0x00000008}, {0xe68, 0x001b25a4},
+	{0xe6c, 0x631b25a0}, {0xe70, 0x631b25a0},
+	{0xe74, 0x081b25a0}, {0xe78, 0x081b25a0},
+	{0xe7c, 0x081b25a0}, {0xe80, 0x081b25a0},
+	{0xe84, 0x631b25a0}, {0xe88, 0x081b25a0},
+	{0xe8c, 0x631b25a0}, {0xed0, 0x631b25a0},
+	{0xed4, 0x631b25a0}, {0xed8, 0x631b25a0},
+	{0xedc, 0x001b25a0}, {0xee0, 0x001b25a0},
+	{0xeec, 0x6b1b25a0}, {0xee8, 0x31555448},
+	{0xf14, 0x00000003}, {0xf4c, 0x00000000},
+	{0xf00, 0x00000300},
+	{0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_standard_table[] = {
+	{0xc78, 0x7b000001}, {0xc78, 0x7b010001},
+	{0xc78, 0x7b020001}, {0xc78, 0x7b030001},
+	{0xc78, 0x7b040001}, {0xc78, 0x7b050001},
+	{0xc78, 0x7a060001}, {0xc78, 0x79070001},
+	{0xc78, 0x78080001}, {0xc78, 0x77090001},
+	{0xc78, 0x760a0001}, {0xc78, 0x750b0001},
+	{0xc78, 0x740c0001}, {0xc78, 0x730d0001},
+	{0xc78, 0x720e0001}, {0xc78, 0x710f0001},
+	{0xc78, 0x70100001}, {0xc78, 0x6f110001},
+	{0xc78, 0x6e120001}, {0xc78, 0x6d130001},
+	{0xc78, 0x6c140001}, {0xc78, 0x6b150001},
+	{0xc78, 0x6a160001}, {0xc78, 0x69170001},
+	{0xc78, 0x68180001}, {0xc78, 0x67190001},
+	{0xc78, 0x661a0001}, {0xc78, 0x651b0001},
+	{0xc78, 0x641c0001}, {0xc78, 0x631d0001},
+	{0xc78, 0x621e0001}, {0xc78, 0x611f0001},
+	{0xc78, 0x60200001}, {0xc78, 0x49210001},
+	{0xc78, 0x48220001}, {0xc78, 0x47230001},
+	{0xc78, 0x46240001}, {0xc78, 0x45250001},
+	{0xc78, 0x44260001}, {0xc78, 0x43270001},
+	{0xc78, 0x42280001}, {0xc78, 0x41290001},
+	{0xc78, 0x402a0001}, {0xc78, 0x262b0001},
+	{0xc78, 0x252c0001}, {0xc78, 0x242d0001},
+	{0xc78, 0x232e0001}, {0xc78, 0x222f0001},
+	{0xc78, 0x21300001}, {0xc78, 0x20310001},
+	{0xc78, 0x06320001}, {0xc78, 0x05330001},
+	{0xc78, 0x04340001}, {0xc78, 0x03350001},
+	{0xc78, 0x02360001}, {0xc78, 0x01370001},
+	{0xc78, 0x00380001}, {0xc78, 0x00390001},
+	{0xc78, 0x003a0001}, {0xc78, 0x003b0001},
+	{0xc78, 0x003c0001}, {0xc78, 0x003d0001},
+	{0xc78, 0x003e0001}, {0xc78, 0x003f0001},
+	{0xc78, 0x7b400001}, {0xc78, 0x7b410001},
+	{0xc78, 0x7b420001}, {0xc78, 0x7b430001},
+	{0xc78, 0x7b440001}, {0xc78, 0x7b450001},
+	{0xc78, 0x7a460001}, {0xc78, 0x79470001},
+	{0xc78, 0x78480001}, {0xc78, 0x77490001},
+	{0xc78, 0x764a0001}, {0xc78, 0x754b0001},
+	{0xc78, 0x744c0001}, {0xc78, 0x734d0001},
+	{0xc78, 0x724e0001}, {0xc78, 0x714f0001},
+	{0xc78, 0x70500001}, {0xc78, 0x6f510001},
+	{0xc78, 0x6e520001}, {0xc78, 0x6d530001},
+	{0xc78, 0x6c540001}, {0xc78, 0x6b550001},
+	{0xc78, 0x6a560001}, {0xc78, 0x69570001},
+	{0xc78, 0x68580001}, {0xc78, 0x67590001},
+	{0xc78, 0x665a0001}, {0xc78, 0x655b0001},
+	{0xc78, 0x645c0001}, {0xc78, 0x635d0001},
+	{0xc78, 0x625e0001}, {0xc78, 0x615f0001},
+	{0xc78, 0x60600001}, {0xc78, 0x49610001},
+	{0xc78, 0x48620001}, {0xc78, 0x47630001},
+	{0xc78, 0x46640001}, {0xc78, 0x45650001},
+	{0xc78, 0x44660001}, {0xc78, 0x43670001},
+	{0xc78, 0x42680001}, {0xc78, 0x41690001},
+	{0xc78, 0x406a0001}, {0xc78, 0x266b0001},
+	{0xc78, 0x256c0001}, {0xc78, 0x246d0001},
+	{0xc78, 0x236e0001}, {0xc78, 0x226f0001},
+	{0xc78, 0x21700001}, {0xc78, 0x20710001},
+	{0xc78, 0x06720001}, {0xc78, 0x05730001},
+	{0xc78, 0x04740001}, {0xc78, 0x03750001},
+	{0xc78, 0x02760001}, {0xc78, 0x01770001},
+	{0xc78, 0x00780001}, {0xc78, 0x00790001},
+	{0xc78, 0x007a0001}, {0xc78, 0x007b0001},
+	{0xc78, 0x007c0001}, {0xc78, 0x007d0001},
+	{0xc78, 0x007e0001}, {0xc78, 0x007f0001},
+	{0xc78, 0x3800001e}, {0xc78, 0x3801001e},
+	{0xc78, 0x3802001e}, {0xc78, 0x3803001e},
+	{0xc78, 0x3804001e}, {0xc78, 0x3805001e},
+	{0xc78, 0x3806001e}, {0xc78, 0x3807001e},
+	{0xc78, 0x3808001e}, {0xc78, 0x3c09001e},
+	{0xc78, 0x3e0a001e}, {0xc78, 0x400b001e},
+	{0xc78, 0x440c001e}, {0xc78, 0x480d001e},
+	{0xc78, 0x4c0e001e}, {0xc78, 0x500f001e},
+	{0xc78, 0x5210001e}, {0xc78, 0x5611001e},
+	{0xc78, 0x5a12001e}, {0xc78, 0x5e13001e},
+	{0xc78, 0x6014001e}, {0xc78, 0x6015001e},
+	{0xc78, 0x6016001e}, {0xc78, 0x6217001e},
+	{0xc78, 0x6218001e}, {0xc78, 0x6219001e},
+	{0xc78, 0x621a001e}, {0xc78, 0x621b001e},
+	{0xc78, 0x621c001e}, {0xc78, 0x621d001e},
+	{0xc78, 0x621e001e}, {0xc78, 0x621f001e},
+	{0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_highpa_table[] = {
+	{0xc78, 0x7b000001}, {0xc78, 0x7b010001},
+	{0xc78, 0x7b020001}, {0xc78, 0x7b030001},
+	{0xc78, 0x7b040001}, {0xc78, 0x7b050001},
+	{0xc78, 0x7b060001}, {0xc78, 0x7b070001},
+	{0xc78, 0x7b080001}, {0xc78, 0x7a090001},
+	{0xc78, 0x790a0001}, {0xc78, 0x780b0001},
+	{0xc78, 0x770c0001}, {0xc78, 0x760d0001},
+	{0xc78, 0x750e0001}, {0xc78, 0x740f0001},
+	{0xc78, 0x73100001}, {0xc78, 0x72110001},
+	{0xc78, 0x71120001}, {0xc78, 0x70130001},
+	{0xc78, 0x6f140001}, {0xc78, 0x6e150001},
+	{0xc78, 0x6d160001}, {0xc78, 0x6c170001},
+	{0xc78, 0x6b180001}, {0xc78, 0x6a190001},
+	{0xc78, 0x691a0001}, {0xc78, 0x681b0001},
+	{0xc78, 0x671c0001}, {0xc78, 0x661d0001},
+	{0xc78, 0x651e0001}, {0xc78, 0x641f0001},
+	{0xc78, 0x63200001}, {0xc78, 0x62210001},
+	{0xc78, 0x61220001}, {0xc78, 0x60230001},
+	{0xc78, 0x46240001}, {0xc78, 0x45250001},
+	{0xc78, 0x44260001}, {0xc78, 0x43270001},
+	{0xc78, 0x42280001}, {0xc78, 0x41290001},
+	{0xc78, 0x402a0001}, {0xc78, 0x262b0001},
+	{0xc78, 0x252c0001}, {0xc78, 0x242d0001},
+	{0xc78, 0x232e0001}, {0xc78, 0x222f0001},
+	{0xc78, 0x21300001}, {0xc78, 0x20310001},
+	{0xc78, 0x06320001}, {0xc78, 0x05330001},
+	{0xc78, 0x04340001}, {0xc78, 0x03350001},
+	{0xc78, 0x02360001}, {0xc78, 0x01370001},
+	{0xc78, 0x00380001}, {0xc78, 0x00390001},
+	{0xc78, 0x003a0001}, {0xc78, 0x003b0001},
+	{0xc78, 0x003c0001}, {0xc78, 0x003d0001},
+	{0xc78, 0x003e0001}, {0xc78, 0x003f0001},
+	{0xc78, 0x7b400001}, {0xc78, 0x7b410001},
+	{0xc78, 0x7b420001}, {0xc78, 0x7b430001},
+	{0xc78, 0x7b440001}, {0xc78, 0x7b450001},
+	{0xc78, 0x7b460001}, {0xc78, 0x7b470001},
+	{0xc78, 0x7b480001}, {0xc78, 0x7a490001},
+	{0xc78, 0x794a0001}, {0xc78, 0x784b0001},
+	{0xc78, 0x774c0001}, {0xc78, 0x764d0001},
+	{0xc78, 0x754e0001}, {0xc78, 0x744f0001},
+	{0xc78, 0x73500001}, {0xc78, 0x72510001},
+	{0xc78, 0x71520001}, {0xc78, 0x70530001},
+	{0xc78, 0x6f540001}, {0xc78, 0x6e550001},
+	{0xc78, 0x6d560001}, {0xc78, 0x6c570001},
+	{0xc78, 0x6b580001}, {0xc78, 0x6a590001},
+	{0xc78, 0x695a0001}, {0xc78, 0x685b0001},
+	{0xc78, 0x675c0001}, {0xc78, 0x665d0001},
+	{0xc78, 0x655e0001}, {0xc78, 0x645f0001},
+	{0xc78, 0x63600001}, {0xc78, 0x62610001},
+	{0xc78, 0x61620001}, {0xc78, 0x60630001},
+	{0xc78, 0x46640001}, {0xc78, 0x45650001},
+	{0xc78, 0x44660001}, {0xc78, 0x43670001},
+	{0xc78, 0x42680001}, {0xc78, 0x41690001},
+	{0xc78, 0x406a0001}, {0xc78, 0x266b0001},
+	{0xc78, 0x256c0001}, {0xc78, 0x246d0001},
+	{0xc78, 0x236e0001}, {0xc78, 0x226f0001},
+	{0xc78, 0x21700001}, {0xc78, 0x20710001},
+	{0xc78, 0x06720001}, {0xc78, 0x05730001},
+	{0xc78, 0x04740001}, {0xc78, 0x03750001},
+	{0xc78, 0x02760001}, {0xc78, 0x01770001},
+	{0xc78, 0x00780001}, {0xc78, 0x00790001},
+	{0xc78, 0x007a0001}, {0xc78, 0x007b0001},
+	{0xc78, 0x007c0001}, {0xc78, 0x007d0001},
+	{0xc78, 0x007e0001}, {0xc78, 0x007f0001},
+	{0xc78, 0x3800001e}, {0xc78, 0x3801001e},
+	{0xc78, 0x3802001e}, {0xc78, 0x3803001e},
+	{0xc78, 0x3804001e}, {0xc78, 0x3805001e},
+	{0xc78, 0x3806001e}, {0xc78, 0x3807001e},
+	{0xc78, 0x3808001e}, {0xc78, 0x3c09001e},
+	{0xc78, 0x3e0a001e}, {0xc78, 0x400b001e},
+	{0xc78, 0x440c001e}, {0xc78, 0x480d001e},
+	{0xc78, 0x4c0e001e}, {0xc78, 0x500f001e},
+	{0xc78, 0x5210001e}, {0xc78, 0x5611001e},
+	{0xc78, 0x5a12001e}, {0xc78, 0x5e13001e},
+	{0xc78, 0x6014001e}, {0xc78, 0x6015001e},
+	{0xc78, 0x6016001e}, {0xc78, 0x6217001e},
+	{0xc78, 0x6218001e}, {0xc78, 0x6219001e},
+	{0xc78, 0x621a001e}, {0xc78, 0x621b001e},
+	{0xc78, 0x621c001e}, {0xc78, 0x621d001e},
+	{0xc78, 0x621e001e}, {0xc78, 0x621f001e},
+	{0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
+	{0x00, 0x00030159}, {0x01, 0x00031284},
+	{0x02, 0x00098000}, {0x03, 0x00039c63},
+	{0x04, 0x000210e7}, {0x09, 0x0002044f},
+	{0x0a, 0x0001a3f1}, {0x0b, 0x00014787},
+	{0x0c, 0x000896fe}, {0x0d, 0x0000e02c},
+	{0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+	{0x19, 0x00000000}, {0x1a, 0x00030355},
+	{0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+	{0x1d, 0x000a1250}, {0x1e, 0x0000024f},
+	{0x1f, 0x00000000}, {0x20, 0x0000b614},
+	{0x21, 0x0006c000}, {0x22, 0x00000000},
+	{0x23, 0x00001558}, {0x24, 0x00000060},
+	{0x25, 0x00000483}, {0x26, 0x0004f000},
+	{0x27, 0x000ec7d9}, {0x28, 0x00057730},
+	{0x29, 0x00004783}, {0x2a, 0x00000001},
+	{0x2b, 0x00021334}, {0x2a, 0x00000000},
+	{0x2b, 0x00000054}, {0x2a, 0x00000001},
+	{0x2b, 0x00000808}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000c}, {0x2a, 0x00000002},
+	{0x2b, 0x00000808}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000003},
+	{0x2b, 0x00000808}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000004},
+	{0x2b, 0x00000808}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000005},
+	{0x2b, 0x00000808}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000006},
+	{0x2b, 0x00000709}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000007},
+	{0x2b, 0x00000709}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000008},
+	{0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000009},
+	{0x2b, 0x0000060a}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+	{0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+	{0x2b, 0x0000060a}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+	{0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+	{0x2b, 0x0000060a}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+	{0x2b, 0x0000050b}, {0x2b, 0x00066666},
+	{0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+	{0x10, 0x0004000f}, {0x11, 0x000e31fc},
+	{0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+	{0x10, 0x0002000f}, {0x11, 0x000203f9},
+	{0x10, 0x0003000f}, {0x11, 0x000ff500},
+	{0x10, 0x00000000}, {0x11, 0x00000000},
+	{0x10, 0x0008000f}, {0x11, 0x0003f100},
+	{0x10, 0x0009000f}, {0x11, 0x00023100},
+	{0x12, 0x00032000}, {0x12, 0x00071000},
+	{0x12, 0x000b0000}, {0x12, 0x000fc000},
+	{0x13, 0x000287b3}, {0x13, 0x000244b7},
+	{0x13, 0x000204ab}, {0x13, 0x0001c49f},
+	{0x13, 0x00018493}, {0x13, 0x0001429b},
+	{0x13, 0x00010299}, {0x13, 0x0000c29c},
+	{0x13, 0x000081a0}, {0x13, 0x000040ac},
+	{0x13, 0x00000020}, {0x14, 0x0001944c},
+	{0x14, 0x00059444}, {0x14, 0x0009944c},
+	{0x14, 0x000d9444}, {0x15, 0x0000f474},
+	{0x15, 0x0004f477}, {0x15, 0x0008f455},
+	{0x15, 0x000cf455}, {0x16, 0x00000339},
+	{0x16, 0x00040339}, {0x16, 0x00080339},
+	{0x16, 0x000c0366}, {0x00, 0x00010159},
+	{0x18, 0x0000f401}, {0xfe, 0x00000000},
+	{0xfe, 0x00000000}, {0x1f, 0x00000003},
+	{0xfe, 0x00000000}, {0xfe, 0x00000000},
+	{0x1e, 0x00000247}, {0x1f, 0x00000000},
+	{0x00, 0x00030159},
+	{0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
+	{0x00, 0x00030159}, {0x01, 0x00031284},
+	{0x02, 0x00098000}, {0x03, 0x00018c63},
+	{0x04, 0x000210e7}, {0x09, 0x0002044f},
+	{0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+	{0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+	{0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+	{0x19, 0x00000000}, {0x1a, 0x00010255},
+	{0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+	{0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+	{0x1f, 0x00080001}, {0x20, 0x0000b614},
+	{0x21, 0x0006c000}, {0x22, 0x00000000},
+	{0x23, 0x00001558}, {0x24, 0x00000060},
+	{0x25, 0x00000483}, {0x26, 0x0004f000},
+	{0x27, 0x000ec7d9}, {0x28, 0x000577c0},
+	{0x29, 0x00004783}, {0x2a, 0x00000001},
+	{0x2b, 0x00021334}, {0x2a, 0x00000000},
+	{0x2b, 0x00000054}, {0x2a, 0x00000001},
+	{0x2b, 0x00000808}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000c}, {0x2a, 0x00000002},
+	{0x2b, 0x00000808}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000003},
+	{0x2b, 0x00000808}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000004},
+	{0x2b, 0x00000808}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000005},
+	{0x2b, 0x00000808}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000006},
+	{0x2b, 0x00000709}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000007},
+	{0x2b, 0x00000709}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000008},
+	{0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000009},
+	{0x2b, 0x0000060a}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+	{0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+	{0x2b, 0x0000060a}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+	{0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+	{0x2b, 0x0000060a}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+	{0x2b, 0x0000050b}, {0x2b, 0x00066666},
+	{0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+	{0x10, 0x0004000f}, {0x11, 0x000e31fc},
+	{0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+	{0x10, 0x0002000f}, {0x11, 0x000203f9},
+	{0x10, 0x0003000f}, {0x11, 0x000ff500},
+	{0x10, 0x00000000}, {0x11, 0x00000000},
+	{0x10, 0x0008000f}, {0x11, 0x0003f100},
+	{0x10, 0x0009000f}, {0x11, 0x00023100},
+	{0x12, 0x00032000}, {0x12, 0x00071000},
+	{0x12, 0x000b0000}, {0x12, 0x000fc000},
+	{0x13, 0x000287b3}, {0x13, 0x000244b7},
+	{0x13, 0x000204ab}, {0x13, 0x0001c49f},
+	{0x13, 0x00018493}, {0x13, 0x0001429b},
+	{0x13, 0x00010299}, {0x13, 0x0000c29c},
+	{0x13, 0x000081a0}, {0x13, 0x000040ac},
+	{0x13, 0x00000020}, {0x14, 0x0001944c},
+	{0x14, 0x00059444}, {0x14, 0x0009944c},
+	{0x14, 0x000d9444}, {0x15, 0x0000f424},
+	{0x15, 0x0004f424}, {0x15, 0x0008f424},
+	{0x15, 0x000cf424}, {0x16, 0x000e0330},
+	{0x16, 0x000a0330}, {0x16, 0x00060330},
+	{0x16, 0x00020330}, {0x00, 0x00010159},
+	{0x18, 0x0000f401}, {0xfe, 0x00000000},
+	{0xfe, 0x00000000}, {0x1f, 0x00080003},
+	{0xfe, 0x00000000}, {0xfe, 0x00000000},
+	{0x1e, 0x00044457}, {0x1f, 0x00080000},
+	{0x00, 0x00030159},
+	{0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radiob_2t_init_table[] = {
+	{0x00, 0x00030159}, {0x01, 0x00031284},
+	{0x02, 0x00098000}, {0x03, 0x00018c63},
+	{0x04, 0x000210e7}, {0x09, 0x0002044f},
+	{0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+	{0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+	{0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+	{0x12, 0x00032000}, {0x12, 0x00071000},
+	{0x12, 0x000b0000}, {0x12, 0x000fc000},
+	{0x13, 0x000287af}, {0x13, 0x000244b7},
+	{0x13, 0x000204ab}, {0x13, 0x0001c49f},
+	{0x13, 0x00018493}, {0x13, 0x00014297},
+	{0x13, 0x00010295}, {0x13, 0x0000c298},
+	{0x13, 0x0000819c}, {0x13, 0x000040a8},
+	{0x13, 0x0000001c}, {0x14, 0x0001944c},
+	{0x14, 0x00059444}, {0x14, 0x0009944c},
+	{0x14, 0x000d9444}, {0x15, 0x0000f424},
+	{0x15, 0x0004f424}, {0x15, 0x0008f424},
+	{0x15, 0x000cf424}, {0x16, 0x000e0330},
+	{0x16, 0x000a0330}, {0x16, 0x00060330},
+	{0x16, 0x00020330},
+	{0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radioa_1t_init_table[] = {
+	{0x00, 0x00030159}, {0x01, 0x00031284},
+	{0x02, 0x00098000}, {0x03, 0x00018c63},
+	{0x04, 0x000210e7}, {0x09, 0x0002044f},
+	{0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+	{0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+	{0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+	{0x19, 0x00000000}, {0x1a, 0x00010255},
+	{0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+	{0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+	{0x1f, 0x00080001}, {0x20, 0x0000b614},
+	{0x21, 0x0006c000}, {0x22, 0x00000000},
+	{0x23, 0x00001558}, {0x24, 0x00000060},
+	{0x25, 0x00000483}, {0x26, 0x0004f000},
+	{0x27, 0x000ec7d9}, {0x28, 0x000577c0},
+	{0x29, 0x00004783}, {0x2a, 0x00000001},
+	{0x2b, 0x00021334}, {0x2a, 0x00000000},
+	{0x2b, 0x00000054}, {0x2a, 0x00000001},
+	{0x2b, 0x00000808}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000c}, {0x2a, 0x00000002},
+	{0x2b, 0x00000808}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000003},
+	{0x2b, 0x00000808}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000004},
+	{0x2b, 0x00000808}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000005},
+	{0x2b, 0x00000808}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000006},
+	{0x2b, 0x00000709}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000007},
+	{0x2b, 0x00000709}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000008},
+	{0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000009},
+	{0x2b, 0x0000060a}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+	{0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+	{0x2b, 0x0000060a}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+	{0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+	{0x2b, 0x0000060a}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+	{0x2b, 0x0000050b}, {0x2b, 0x00066666},
+	{0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+	{0x10, 0x0004000f}, {0x11, 0x000e31fc},
+	{0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+	{0x10, 0x0002000f}, {0x11, 0x000203f9},
+	{0x10, 0x0003000f}, {0x11, 0x000ff500},
+	{0x10, 0x00000000}, {0x11, 0x00000000},
+	{0x10, 0x0008000f}, {0x11, 0x0003f100},
+	{0x10, 0x0009000f}, {0x11, 0x00023100},
+	{0x12, 0x00032000}, {0x12, 0x00071000},
+	{0x12, 0x000b0000}, {0x12, 0x000fc000},
+	{0x13, 0x000287b3}, {0x13, 0x000244b7},
+	{0x13, 0x000204ab}, {0x13, 0x0001c49f},
+	{0x13, 0x00018493}, {0x13, 0x0001429b},
+	{0x13, 0x00010299}, {0x13, 0x0000c29c},
+	{0x13, 0x000081a0}, {0x13, 0x000040ac},
+	{0x13, 0x00000020}, {0x14, 0x0001944c},
+	{0x14, 0x00059444}, {0x14, 0x0009944c},
+	{0x14, 0x000d9444}, {0x15, 0x0000f405},
+	{0x15, 0x0004f405}, {0x15, 0x0008f405},
+	{0x15, 0x000cf405}, {0x16, 0x000e0330},
+	{0x16, 0x000a0330}, {0x16, 0x00060330},
+	{0x16, 0x00020330}, {0x00, 0x00010159},
+	{0x18, 0x0000f401}, {0xfe, 0x00000000},
+	{0xfe, 0x00000000}, {0x1f, 0x00080003},
+	{0xfe, 0x00000000}, {0xfe, 0x00000000},
+	{0x1e, 0x00044457}, {0x1f, 0x00080000},
+	{0x00, 0x00030159},
+	{0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = {
+	{0x00, 0x00030159}, {0x01, 0x00031284},
+	{0x02, 0x00098000}, {0x03, 0x00018c63},
+	{0x04, 0x000210e7}, {0x09, 0x0002044f},
+	{0x0a, 0x0001adb0}, {0x0b, 0x00054867},
+	{0x0c, 0x0008992e}, {0x0d, 0x0000e529},
+	{0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+	{0x19, 0x00000000}, {0x1a, 0x00000255},
+	{0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+	{0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+	{0x1f, 0x00080001}, {0x20, 0x0000b614},
+	{0x21, 0x0006c000}, {0x22, 0x0000083c},
+	{0x23, 0x00001558}, {0x24, 0x00000060},
+	{0x25, 0x00000483}, {0x26, 0x0004f000},
+	{0x27, 0x000ec7d9}, {0x28, 0x000977c0},
+	{0x29, 0x00004783}, {0x2a, 0x00000001},
+	{0x2b, 0x00021334}, {0x2a, 0x00000000},
+	{0x2b, 0x00000054}, {0x2a, 0x00000001},
+	{0x2b, 0x00000808}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000c}, {0x2a, 0x00000002},
+	{0x2b, 0x00000808}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000003},
+	{0x2b, 0x00000808}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000004},
+	{0x2b, 0x00000808}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000005},
+	{0x2b, 0x00000808}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000006},
+	{0x2b, 0x00000709}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000007},
+	{0x2b, 0x00000709}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000008},
+	{0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000009},
+	{0x2b, 0x0000060a}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+	{0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+	{0x2b, 0x0000060a}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+	{0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+	{0x2b, 0x0000060a}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+	{0x2b, 0x0000050b}, {0x2b, 0x00066666},
+	{0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+	{0x10, 0x0004000f}, {0x11, 0x000e31fc},
+	{0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+	{0x10, 0x0002000f}, {0x11, 0x000203f9},
+	{0x10, 0x0003000f}, {0x11, 0x000ff500},
+	{0x10, 0x00000000}, {0x11, 0x00000000},
+	{0x10, 0x0008000f}, {0x11, 0x0003f100},
+	{0x10, 0x0009000f}, {0x11, 0x00023100},
+	{0x12, 0x000d8000}, {0x12, 0x00090000},
+	{0x12, 0x00051000}, {0x12, 0x00012000},
+	{0x13, 0x00028fb4}, {0x13, 0x00024fa8},
+	{0x13, 0x000207a4}, {0x13, 0x0001c3b0},
+	{0x13, 0x000183a4}, {0x13, 0x00014398},
+	{0x13, 0x000101a4}, {0x13, 0x0000c198},
+	{0x13, 0x000080a4}, {0x13, 0x00004098},
+	{0x13, 0x00000000}, {0x14, 0x0001944c},
+	{0x14, 0x00059444}, {0x14, 0x0009944c},
+	{0x14, 0x000d9444}, {0x15, 0x0000f405},
+	{0x15, 0x0004f405}, {0x15, 0x0008f405},
+	{0x15, 0x000cf405}, {0x16, 0x000e0330},
+	{0x16, 0x000a0330}, {0x16, 0x00060330},
+	{0x16, 0x00020330}, {0x00, 0x00010159},
+	{0x18, 0x0000f401}, {0xfe, 0x00000000},
+	{0xfe, 0x00000000}, {0x1f, 0x00080003},
+	{0xfe, 0x00000000}, {0xfe, 0x00000000},
+	{0x1e, 0x00044457}, {0x1f, 0x00080000},
+	{0x00, 0x00030159},
+	{0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = {
+	{	/* RF_A */
+		.hssiparm1 = REG_FPGA0_XA_HSSI_PARM1,
+		.hssiparm2 = REG_FPGA0_XA_HSSI_PARM2,
+		.lssiparm = REG_FPGA0_XA_LSSI_PARM,
+		.hspiread = REG_HSPI_XA_READBACK,
+		.lssiread = REG_FPGA0_XA_LSSI_READBACK,
+		.rf_sw_ctrl = REG_FPGA0_XA_RF_SW_CTRL,
+	},
+	{	/* RF_B */
+		.hssiparm1 = REG_FPGA0_XB_HSSI_PARM1,
+		.hssiparm2 = REG_FPGA0_XB_HSSI_PARM2,
+		.lssiparm = REG_FPGA0_XB_LSSI_PARM,
+		.hspiread = REG_HSPI_XB_READBACK,
+		.lssiread = REG_FPGA0_XB_LSSI_READBACK,
+		.rf_sw_ctrl = REG_FPGA0_XB_RF_SW_CTRL,
+	},
+};
+
+static const u32 rtl8723au_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
+	REG_OFDM0_XA_RX_IQ_IMBALANCE,
+	REG_OFDM0_XB_RX_IQ_IMBALANCE,
+	REG_OFDM0_ENERGY_CCA_THRES,
+	REG_OFDM0_AGCR_SSI_TABLE,
+	REG_OFDM0_XA_TX_IQ_IMBALANCE,
+	REG_OFDM0_XB_TX_IQ_IMBALANCE,
+	REG_OFDM0_XC_TX_AFE,
+	REG_OFDM0_XD_TX_AFE,
+	REG_OFDM0_RX_IQ_EXT_ANTA
+};
+
+static u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr)
+{
+	struct usb_device *udev = priv->udev;
+	int len;
+	u8 data;
+
+	mutex_lock(&priv->usb_buf_mutex);
+	len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+			      REALTEK_USB_CMD_REQ, REALTEK_USB_READ,
+			      addr, 0, &priv->usb_buf.val8, sizeof(u8),
+			      RTW_USB_CONTROL_MSG_TIMEOUT);
+	data = priv->usb_buf.val8;
+	mutex_unlock(&priv->usb_buf_mutex);
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ)
+		dev_info(&udev->dev, "%s(%04x)   = 0x%02x, len %i\n",
+			 __func__, addr, data, len);
+	return data;
+}
+
+static u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr)
+{
+	struct usb_device *udev = priv->udev;
+	int len;
+	u16 data;
+
+	mutex_lock(&priv->usb_buf_mutex);
+	len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+			      REALTEK_USB_CMD_REQ, REALTEK_USB_READ,
+			      addr, 0, &priv->usb_buf.val16, sizeof(u16),
+			      RTW_USB_CONTROL_MSG_TIMEOUT);
+	data = le16_to_cpu(priv->usb_buf.val16);
+	mutex_unlock(&priv->usb_buf_mutex);
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ)
+		dev_info(&udev->dev, "%s(%04x)  = 0x%04x, len %i\n",
+			 __func__, addr, data, len);
+	return data;
+}
+
+static u32 rtl8xxxu_read32(struct rtl8xxxu_priv *priv, u16 addr)
+{
+	struct usb_device *udev = priv->udev;
+	int len;
+	u32 data;
+
+	mutex_lock(&priv->usb_buf_mutex);
+	len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+			      REALTEK_USB_CMD_REQ, REALTEK_USB_READ,
+			      addr, 0, &priv->usb_buf.val32, sizeof(u32),
+			      RTW_USB_CONTROL_MSG_TIMEOUT);
+	data = le32_to_cpu(priv->usb_buf.val32);
+	mutex_unlock(&priv->usb_buf_mutex);
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ)
+		dev_info(&udev->dev, "%s(%04x)  = 0x%08x, len %i\n",
+			 __func__, addr, data, len);
+	return data;
+}
+
+static int rtl8xxxu_write8(struct rtl8xxxu_priv *priv, u16 addr, u8 val)
+{
+	struct usb_device *udev = priv->udev;
+	int ret;
+
+	mutex_lock(&priv->usb_buf_mutex);
+	priv->usb_buf.val8 = val;
+	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+			      REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+			      addr, 0, &priv->usb_buf.val8, sizeof(u8),
+			      RTW_USB_CONTROL_MSG_TIMEOUT);
+
+	mutex_unlock(&priv->usb_buf_mutex);
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+		dev_info(&udev->dev, "%s(%04x) = 0x%02x\n",
+			 __func__, addr, val);
+	return ret;
+}
+
+static int rtl8xxxu_write16(struct rtl8xxxu_priv *priv, u16 addr, u16 val)
+{
+	struct usb_device *udev = priv->udev;
+	int ret;
+
+	mutex_lock(&priv->usb_buf_mutex);
+	priv->usb_buf.val16 = cpu_to_le16(val);
+	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+			      REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+			      addr, 0, &priv->usb_buf.val16, sizeof(u16),
+			      RTW_USB_CONTROL_MSG_TIMEOUT);
+	mutex_unlock(&priv->usb_buf_mutex);
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+		dev_info(&udev->dev, "%s(%04x) = 0x%04x\n",
+			 __func__, addr, val);
+	return ret;
+}
+
+static int rtl8xxxu_write32(struct rtl8xxxu_priv *priv, u16 addr, u32 val)
+{
+	struct usb_device *udev = priv->udev;
+	int ret;
+
+	mutex_lock(&priv->usb_buf_mutex);
+	priv->usb_buf.val32 = cpu_to_le32(val);
+	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+			      REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+			      addr, 0, &priv->usb_buf.val32, sizeof(u32),
+			      RTW_USB_CONTROL_MSG_TIMEOUT);
+	mutex_unlock(&priv->usb_buf_mutex);
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+		dev_info(&udev->dev, "%s(%04x) = 0x%08x\n",
+			 __func__, addr, val);
+	return ret;
+}
+
+static int
+rtl8xxxu_writeN(struct rtl8xxxu_priv *priv, u16 addr, u8 *buf, u16 len)
+{
+	struct usb_device *udev = priv->udev;
+	int blocksize = priv->fops->writeN_block_size;
+	int ret, i, count, remainder;
+
+	count = len / blocksize;
+	remainder = len % blocksize;
+
+	for (i = 0; i < count; i++) {
+		ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+				      REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+				      addr, 0, buf, blocksize,
+				      RTW_USB_CONTROL_MSG_TIMEOUT);
+		if (ret != blocksize)
+			goto write_error;
+
+		addr += blocksize;
+		buf += blocksize;
+	}
+
+	if (remainder) {
+		ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+				      REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+				      addr, 0, buf, remainder,
+				      RTW_USB_CONTROL_MSG_TIMEOUT);
+		if (ret != remainder)
+			goto write_error;
+	}
+
+	return len;
+
+write_error:
+	dev_info(&udev->dev,
+		 "%s: Failed to write block at addr: %04x size: %04x\n",
+		 __func__, addr, blocksize);
+	return -EAGAIN;
+}
+
+static u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv,
+			       enum rtl8xxxu_rfpath path, u8 reg)
+{
+	u32 hssia, val32, retval;
+
+	hssia = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2);
+	if (path != RF_A)
+		val32 = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hssiparm2);
+	else
+		val32 = hssia;
+
+	val32 &= ~FPGA0_HSSI_PARM2_ADDR_MASK;
+	val32 |= (reg << FPGA0_HSSI_PARM2_ADDR_SHIFT);
+	val32 |= FPGA0_HSSI_PARM2_EDGE_READ;
+	hssia &= ~FPGA0_HSSI_PARM2_EDGE_READ;
+	rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM2, hssia);
+
+	udelay(10);
+
+	rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].hssiparm2, val32);
+	udelay(100);
+
+	hssia |= FPGA0_HSSI_PARM2_EDGE_READ;
+	rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM2, hssia);
+	udelay(10);
+
+	val32 = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hssiparm1);
+	if (val32 & FPGA0_HSSI_PARM1_PI)
+		retval = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hspiread);
+	else
+		retval = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].lssiread);
+
+	retval &= 0xfffff;
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_READ)
+		dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n",
+			 __func__, reg, retval);
+	return retval;
+}
+
+static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
+				enum rtl8xxxu_rfpath path, u8 reg, u32 data)
+{
+	int ret, retval;
+	u32 dataaddr;
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_WRITE)
+		dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n",
+			 __func__, reg, data);
+
+	data &= FPGA0_LSSI_PARM_DATA_MASK;
+	dataaddr = (reg << FPGA0_LSSI_PARM_ADDR_SHIFT) | data;
+
+	/* Use XB for path B */
+	ret = rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].lssiparm, dataaddr);
+	if (ret != sizeof(dataaddr))
+		retval = -EIO;
+	else
+		retval = 0;
+
+	udelay(1);
+
+	return retval;
+}
+
+static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c)
+{
+	struct device *dev = &priv->udev->dev;
+	int mbox_nr, retry, retval = 0;
+	int mbox_reg, mbox_ext_reg;
+	u8 val8;
+
+	mutex_lock(&priv->h2c_mutex);
+
+	mbox_nr = priv->next_mbox;
+	mbox_reg = REG_HMBOX_0 + (mbox_nr * 4);
+	mbox_ext_reg = REG_HMBOX_EXT_0 + (mbox_nr * 2);
+
+	/*
+	 * MBOX ready?
+	 */
+	retry = 100;
+	do {
+		val8 = rtl8xxxu_read8(priv, REG_HMTFR);
+		if (!(val8 & BIT(mbox_nr)))
+			break;
+	} while (retry--);
+
+	if (!retry) {
+		dev_dbg(dev, "%s: Mailbox busy\n", __func__);
+		retval = -EBUSY;
+		goto error;
+	}
+
+	/*
+	 * Need to swap as it's being swapped again by rtl8xxxu_write16/32()
+	 */
+	if (h2c->cmd.cmd & H2C_EXT) {
+		rtl8xxxu_write16(priv, mbox_ext_reg,
+				 le16_to_cpu(h2c->raw.ext));
+		if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+			dev_info(dev, "H2C_EXT %04x\n",
+				 le16_to_cpu(h2c->raw.ext));
+	}
+	rtl8xxxu_write32(priv, mbox_reg, le32_to_cpu(h2c->raw.data));
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+		dev_info(dev, "H2C %08x\n", le32_to_cpu(h2c->raw.data));
+
+	priv->next_mbox = (mbox_nr + 1) % H2C_MAX_MBOX;
+
+error:
+	mutex_unlock(&priv->h2c_mutex);
+	return retval;
+}
+
+static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u32 val32;
+
+	val8 = rtl8xxxu_read8(priv, REG_SPS0_CTRL);
+	val8 |= BIT(0) | BIT(3);
+	rtl8xxxu_write8(priv, REG_SPS0_CTRL, val8);
+
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_PARM);
+	val32 &= ~(BIT(4) | BIT(5));
+	val32 |= BIT(3);
+	if (priv->rf_paths == 2) {
+		val32 &= ~(BIT(20) | BIT(21));
+		val32 |= BIT(19);
+	}
+	rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_PARM, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+	val32 &= ~OFDM_RF_PATH_TX_MASK;
+	if (priv->tx_paths == 2)
+		val32 |= OFDM_RF_PATH_TX_A | OFDM_RF_PATH_TX_B;
+	else if (priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c)
+		val32 |= OFDM_RF_PATH_TX_B;
+	else
+		val32 |= OFDM_RF_PATH_TX_A;
+	rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+	val32 &= ~FPGA_RF_MODE_JAPAN;
+	rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+	if (priv->rf_paths == 2)
+		rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x63db25a0);
+	else
+		rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x631b25a0);
+
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x32d95);
+	if (priv->rf_paths == 2)
+		rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0x32d95);
+
+	rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+}
+
+static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv)
+{
+	u8 sps0;
+	u32 val32;
+
+	rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+	sps0 = rtl8xxxu_read8(priv, REG_SPS0_CTRL);
+
+	/* RF RX code for preamble power saving */
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_PARM);
+	val32 &= ~(BIT(3) | BIT(4) | BIT(5));
+	if (priv->rf_paths == 2)
+		val32 &= ~(BIT(19) | BIT(20) | BIT(21));
+	rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_PARM, val32);
+
+	/* Disable TX for four paths */
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+	val32 &= ~OFDM_RF_PATH_TX_MASK;
+	rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+	/* Enable power saving */
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+	val32 |= FPGA_RF_MODE_JAPAN;
+	rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+	/* AFE control register to power down bits [30:22] */
+	if (priv->rf_paths == 2)
+		rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x00db25a0);
+	else
+		rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x001b25a0);
+
+	/* Power down RF module */
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0);
+	if (priv->rf_paths == 2)
+		rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0);
+
+	sps0 &= ~(BIT(0) | BIT(3));
+	rtl8xxxu_write8(priv, REG_SPS0_CTRL, sps0);
+}
+
+
+static void rtl8723a_stop_tx_beacon(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+
+	val8 = rtl8xxxu_read8(priv, REG_FWHW_TXQ_CTRL + 2);
+	val8 &= ~BIT(6);
+	rtl8xxxu_write8(priv, REG_FWHW_TXQ_CTRL + 2, val8);
+
+	rtl8xxxu_write8(priv, REG_TBTT_PROHIBIT + 1, 0x64);
+	val8 = rtl8xxxu_read8(priv, REG_TBTT_PROHIBIT + 2);
+	val8 &= ~BIT(0);
+	rtl8xxxu_write8(priv, REG_TBTT_PROHIBIT + 2, val8);
+}
+
+
+/*
+ * The rtl8723a has 3 channel groups for it's efuse settings. It only
+ * supports the 2.4GHz band, so channels 1 - 14:
+ *  group 0: channels 1 - 3
+ *  group 1: channels 4 - 9
+ *  group 2: channels 10 - 14
+ *
+ * Note: We index from 0 in the code
+ */
+static int rtl8723a_channel_to_group(int channel)
+{
+	int group;
+
+	if (channel < 4)
+		group = 0;
+	else if (channel < 10)
+		group = 1;
+	else
+		group = 2;
+
+	return group;
+}
+
+static void rtl8723au_config_channel(struct ieee80211_hw *hw)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	u32 val32, rsr;
+	u8 val8, opmode;
+	bool ht = true;
+	int sec_ch_above, channel;
+	int i;
+
+	opmode = rtl8xxxu_read8(priv, REG_BW_OPMODE);
+	rsr = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+	channel = hw->conf.chandef.chan->hw_value;
+
+	switch (hw->conf.chandef.width) {
+	case NL80211_CHAN_WIDTH_20_NOHT:
+		ht = false;
+	case NL80211_CHAN_WIDTH_20:
+		opmode |= BW_OPMODE_20MHZ;
+		rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+		val32 &= ~FPGA_RF_MODE;
+		rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+		val32 &= ~FPGA_RF_MODE;
+		rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_ANALOG2);
+		val32 |= FPGA0_ANALOG2_20MHZ;
+		rtl8xxxu_write32(priv, REG_FPGA0_ANALOG2, val32);
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		if (hw->conf.chandef.center_freq1 >
+		    hw->conf.chandef.chan->center_freq) {
+			sec_ch_above = 1;
+			channel += 2;
+		} else {
+			sec_ch_above = 0;
+			channel -= 2;
+		}
+
+		opmode &= ~BW_OPMODE_20MHZ;
+		rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
+		rsr &= ~RSR_RSC_BANDWIDTH_40M;
+		if (sec_ch_above)
+			rsr |= RSR_RSC_UPPER_SUB_CHANNEL;
+		else
+			rsr |= RSR_RSC_LOWER_SUB_CHANNEL;
+		rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, rsr);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+		val32 |= FPGA_RF_MODE;
+		rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+		val32 |= FPGA_RF_MODE;
+		rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+
+		/*
+		 * Set Control channel to upper or lower. These settings
+		 * are required only for 40MHz
+		 */
+		val32 = rtl8xxxu_read32(priv, REG_CCK0_SYSTEM);
+		val32 &= ~CCK0_SIDEBAND;
+		if (!sec_ch_above)
+			val32 |= CCK0_SIDEBAND;
+		rtl8xxxu_write32(priv, REG_CCK0_SYSTEM, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_OFDM1_LSTF);
+		val32 &= ~OFDM_LSTF_PRIME_CH_MASK; /* 0xc00 */
+		if (sec_ch_above)
+			val32 |= OFDM_LSTF_PRIME_CH_LOW;
+		else
+			val32 |= OFDM_LSTF_PRIME_CH_HIGH;
+		rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_ANALOG2);
+		val32 &= ~FPGA0_ANALOG2_20MHZ;
+		rtl8xxxu_write32(priv, REG_FPGA0_ANALOG2, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+		val32 &= ~(FPGA0_PS_LOWER_CHANNEL | FPGA0_PS_UPPER_CHANNEL);
+		if (sec_ch_above)
+			val32 |= FPGA0_PS_UPPER_CHANNEL;
+		else
+			val32 |= FPGA0_PS_LOWER_CHANNEL;
+		rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+		break;
+
+	default:
+		break;
+	}
+
+	for (i = RF_A; i < priv->rf_paths; i++) {
+		val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+		val32 &= ~MODE_AG_CHANNEL_MASK;
+		val32 |= channel;
+		rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+	}
+
+	if (ht)
+		val8 = 0x0e;
+	else
+		val8 = 0x0a;
+
+	rtl8xxxu_write8(priv, REG_SIFS_CCK + 1, val8);
+	rtl8xxxu_write8(priv, REG_SIFS_OFDM + 1, val8);
+
+	rtl8xxxu_write16(priv, REG_R2T_SIFS, 0x0808);
+	rtl8xxxu_write16(priv, REG_T2T_SIFS, 0x0a0a);
+
+	for (i = RF_A; i < priv->rf_paths; i++) {
+		val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+		if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40)
+			val32 &= ~MODE_AG_CHANNEL_20MHZ;
+		else
+			val32 |= MODE_AG_CHANNEL_20MHZ;
+		rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+	}
+}
+
+static void
+rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+{
+	u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
+	u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
+	u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
+	u8 val8;
+	int group, i;
+
+	group = rtl8723a_channel_to_group(channel);
+
+	cck[0] = priv->cck_tx_power_index_A[group];
+	cck[1] = priv->cck_tx_power_index_B[group];
+
+	ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
+	ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
+
+	ofdmbase[0] = ofdm[0] +	priv->ofdm_tx_power_index_diff[group].a;
+	ofdmbase[1] = ofdm[1] +	priv->ofdm_tx_power_index_diff[group].b;
+
+	mcsbase[0] = ofdm[0];
+	mcsbase[1] = ofdm[1];
+	if (!ht40) {
+		mcsbase[0] += priv->ht20_tx_power_index_diff[group].a;
+		mcsbase[1] += priv->ht20_tx_power_index_diff[group].b;
+	}
+
+	if (priv->tx_paths > 1) {
+		if (ofdm[0] > priv->ht40_2s_tx_power_index_diff[group].a)
+			ofdm[0] -=  priv->ht40_2s_tx_power_index_diff[group].a;
+		if (ofdm[1] > priv->ht40_2s_tx_power_index_diff[group].b)
+			ofdm[1] -=  priv->ht40_2s_tx_power_index_diff[group].b;
+	}
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_CHANNEL)
+		dev_info(&priv->udev->dev,
+			 "%s: Setting TX power CCK A: %02x, "
+			 "CCK B: %02x, OFDM A: %02x, OFDM B: %02x\n",
+			 __func__, cck[0], cck[1], ofdm[0], ofdm[1]);
+
+	for (i = 0; i < RTL8723A_MAX_RF_PATHS; i++) {
+		if (cck[i] > RF6052_MAX_TX_PWR)
+			cck[i] = RF6052_MAX_TX_PWR;
+		if (ofdm[i] > RF6052_MAX_TX_PWR)
+			ofdm[i] = RF6052_MAX_TX_PWR;
+	}
+
+	val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
+	val32 &= 0xffff00ff;
+	val32 |= (cck[0] << 8);
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+	val32 &= 0xff;
+	val32 |= ((cck[0] << 8) | (cck[0] << 16) | (cck[0] << 24));
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+	val32 &= 0xffffff00;
+	val32 |= cck[1];
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK1_55_MCS32);
+	val32 &= 0xff;
+	val32 |= ((cck[1] << 8) | (cck[1] << 16) | (cck[1] << 24));
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK1_55_MCS32, val32);
+
+	ofdm_a = ofdmbase[0] | ofdmbase[0] << 8 |
+		ofdmbase[0] << 16 | ofdmbase[0] << 24;
+	ofdm_b = ofdmbase[1] | ofdmbase[1] << 8 |
+		ofdmbase[1] << 16 | ofdmbase[1] << 24;
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm_a);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm_b);
+
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm_a);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm_b);
+
+	mcs_a = mcsbase[0] | mcsbase[0] << 8 |
+		mcsbase[0] << 16 | mcsbase[0] << 24;
+	mcs_b = mcsbase[1] | mcsbase[1] << 8 |
+		mcsbase[1] << 16 | mcsbase[1] << 24;
+
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs_a);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs_b);
+
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs_a);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs_b);
+
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs_a);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs_b);
+
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs_a);
+	for (i = 0; i < 3; i++) {
+		if (i != 2)
+			val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
+		else
+			val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
+		rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
+	}
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs_b);
+	for (i = 0; i < 3; i++) {
+		if (i != 2)
+			val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
+		else
+			val8 = (mcsbase[1] > 6) ? (mcsbase[1] - 6) : 0;
+		rtl8xxxu_write8(priv, REG_OFDM0_XD_TX_IQ_IMBALANCE + i, val8);
+	}
+}
+
+static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv,
+				  enum nl80211_iftype linktype)
+{
+	u16 val8;
+
+	val8 = rtl8xxxu_read16(priv, REG_MSR);
+	val8 &= ~MSR_LINKTYPE_MASK;
+
+	switch (linktype) {
+	case NL80211_IFTYPE_UNSPECIFIED:
+		val8 |= MSR_LINKTYPE_NONE;
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		val8 |= MSR_LINKTYPE_ADHOC;
+		break;
+	case NL80211_IFTYPE_STATION:
+		val8 |= MSR_LINKTYPE_STATION;
+		break;
+	case NL80211_IFTYPE_AP:
+		val8 |= MSR_LINKTYPE_AP;
+		break;
+	default:
+		goto out;
+	}
+
+	rtl8xxxu_write8(priv, REG_MSR, val8);
+out:
+	return;
+}
+
+static void
+rtl8xxxu_set_retry(struct rtl8xxxu_priv *priv, u16 short_retry, u16 long_retry)
+{
+	u16 val16;
+
+	val16 = ((short_retry << RETRY_LIMIT_SHORT_SHIFT) &
+		 RETRY_LIMIT_SHORT_MASK) |
+		((long_retry << RETRY_LIMIT_LONG_SHIFT) &
+		 RETRY_LIMIT_LONG_MASK);
+
+	rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16);
+}
+
+static void
+rtl8xxxu_set_spec_sifs(struct rtl8xxxu_priv *priv, u16 cck, u16 ofdm)
+{
+	u16 val16;
+
+	val16 = ((cck << SPEC_SIFS_CCK_SHIFT) & SPEC_SIFS_CCK_MASK) |
+		((ofdm << SPEC_SIFS_OFDM_SHIFT) & SPEC_SIFS_OFDM_MASK);
+
+	rtl8xxxu_write16(priv, REG_SPEC_SIFS, val16);
+}
+
+static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
+{
+	struct device *dev = &priv->udev->dev;
+	char *cut;
+
+	switch (priv->chip_cut) {
+	case 0:
+		cut = "A";
+		break;
+	case 1:
+		cut = "B";
+		break;
+	default:
+		cut = "unknown";
+	}
+
+	dev_info(dev,
+		 "RTL%s rev %s (%s) %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n",
+		 priv->chip_name, cut, priv->vendor_umc ? "UMC" : "TSMC",
+		 priv->tx_paths, priv->rx_paths, priv->ep_tx_count,
+		 priv->has_wifi, priv->has_bluetooth, priv->has_gps,
+		 priv->hi_pa);
+
+	dev_info(dev, "RTL%s MAC: %pM\n", priv->chip_name, priv->mac_addr);
+}
+
+static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+{
+	struct device *dev = &priv->udev->dev;
+	u32 val32, bonding;
+	u16 val16;
+
+	val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
+	priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >>
+		SYS_CFG_CHIP_VERSION_SHIFT;
+	if (val32 & SYS_CFG_TRP_VAUX_EN) {
+		dev_info(dev, "Unsupported test chip\n");
+		return -ENOTSUPP;
+	}
+
+	if (val32 & SYS_CFG_BT_FUNC) {
+		sprintf(priv->chip_name, "8723AU");
+		priv->rf_paths = 1;
+		priv->rx_paths = 1;
+		priv->tx_paths = 1;
+		priv->rtlchip = 0x8723a;
+
+		val32 = rtl8xxxu_read32(priv, REG_MULTI_FUNC_CTRL);
+		if (val32 & MULTI_WIFI_FUNC_EN)
+			priv->has_wifi = 1;
+		if (val32 & MULTI_BT_FUNC_EN)
+			priv->has_bluetooth = 1;
+		if (val32 & MULTI_GPS_FUNC_EN)
+			priv->has_gps = 1;
+	} else if (val32 & SYS_CFG_TYPE_ID) {
+		bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
+		bonding &= HPON_FSM_BONDING_MASK;
+		if (bonding == HPON_FSM_BONDING_1T2R) {
+			sprintf(priv->chip_name, "8191CU");
+			priv->rf_paths = 2;
+			priv->rx_paths = 2;
+			priv->tx_paths = 1;
+			priv->rtlchip = 0x8191c;
+		} else {
+			sprintf(priv->chip_name, "8192CU");
+			priv->rf_paths = 2;
+			priv->rx_paths = 2;
+			priv->tx_paths = 2;
+			priv->rtlchip = 0x8192c;
+		}
+		priv->has_wifi = 1;
+	} else {
+		sprintf(priv->chip_name, "8188CU");
+		priv->rf_paths = 1;
+		priv->rx_paths = 1;
+		priv->tx_paths = 1;
+		priv->rtlchip = 0x8188c;
+		priv->has_wifi = 1;
+	}
+
+	if (val32 & SYS_CFG_VENDOR_ID)
+		priv->vendor_umc = 1;
+
+	val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS);
+	priv->rom_rev = (val32 & GPIO_RF_RL_ID) >> 28;
+
+	val16 = rtl8xxxu_read16(priv, REG_NORMAL_SIE_EP_TX);
+	if (val16 & NORMAL_SIE_EP_TX_HIGH_MASK) {
+		priv->ep_tx_high_queue = 1;
+		priv->ep_tx_count++;
+	}
+
+	if (val16 & NORMAL_SIE_EP_TX_NORMAL_MASK) {
+		priv->ep_tx_normal_queue = 1;
+		priv->ep_tx_count++;
+	}
+
+	if (val16 & NORMAL_SIE_EP_TX_LOW_MASK) {
+		priv->ep_tx_low_queue = 1;
+		priv->ep_tx_count++;
+	}
+
+	/*
+	 * Fallback for devices that do not provide REG_NORMAL_SIE_EP_TX
+	 */
+	if (!priv->ep_tx_count) {
+		switch (priv->nr_out_eps) {
+		case 3:
+			priv->ep_tx_low_queue = 1;
+			priv->ep_tx_count++;
+		case 2:
+			priv->ep_tx_normal_queue = 1;
+			priv->ep_tx_count++;
+		case 1:
+			priv->ep_tx_high_queue = 1;
+			priv->ep_tx_count++;
+			break;
+		default:
+			dev_info(dev, "Unsupported USB TX end-points\n");
+			return -ENOTSUPP;
+		}
+	}
+
+	return 0;
+}
+
+static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+	if (priv->efuse_wifi.efuse8723.rtl_id != cpu_to_le16(0x8129))
+		return -EINVAL;
+
+	ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8723.mac_addr);
+
+	memcpy(priv->cck_tx_power_index_A,
+	       priv->efuse_wifi.efuse8723.cck_tx_power_index_A,
+	       sizeof(priv->cck_tx_power_index_A));
+	memcpy(priv->cck_tx_power_index_B,
+	       priv->efuse_wifi.efuse8723.cck_tx_power_index_B,
+	       sizeof(priv->cck_tx_power_index_B));
+
+	memcpy(priv->ht40_1s_tx_power_index_A,
+	       priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_A,
+	       sizeof(priv->ht40_1s_tx_power_index_A));
+	memcpy(priv->ht40_1s_tx_power_index_B,
+	       priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_B,
+	       sizeof(priv->ht40_1s_tx_power_index_B));
+
+	memcpy(priv->ht20_tx_power_index_diff,
+	       priv->efuse_wifi.efuse8723.ht20_tx_power_index_diff,
+	       sizeof(priv->ht20_tx_power_index_diff));
+	memcpy(priv->ofdm_tx_power_index_diff,
+	       priv->efuse_wifi.efuse8723.ofdm_tx_power_index_diff,
+	       sizeof(priv->ofdm_tx_power_index_diff));
+
+	memcpy(priv->ht40_max_power_offset,
+	       priv->efuse_wifi.efuse8723.ht40_max_power_offset,
+	       sizeof(priv->ht40_max_power_offset));
+	memcpy(priv->ht20_max_power_offset,
+	       priv->efuse_wifi.efuse8723.ht20_max_power_offset,
+	       sizeof(priv->ht20_max_power_offset));
+
+	dev_info(&priv->udev->dev, "Vendor: %.7s\n",
+		 priv->efuse_wifi.efuse8723.vendor_name);
+	dev_info(&priv->udev->dev, "Product: %.41s\n",
+		 priv->efuse_wifi.efuse8723.device_name);
+	return 0;
+}
+
+static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+	int i;
+
+	if (priv->efuse_wifi.efuse8192.rtl_id != cpu_to_le16(0x8129))
+		return -EINVAL;
+
+	ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8192.mac_addr);
+
+	memcpy(priv->cck_tx_power_index_A,
+	       priv->efuse_wifi.efuse8192.cck_tx_power_index_A,
+	       sizeof(priv->cck_tx_power_index_A));
+	memcpy(priv->cck_tx_power_index_B,
+	       priv->efuse_wifi.efuse8192.cck_tx_power_index_B,
+	       sizeof(priv->cck_tx_power_index_B));
+
+	memcpy(priv->ht40_1s_tx_power_index_A,
+	       priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_A,
+	       sizeof(priv->ht40_1s_tx_power_index_A));
+	memcpy(priv->ht40_1s_tx_power_index_B,
+	       priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_B,
+	       sizeof(priv->ht40_1s_tx_power_index_B));
+	memcpy(priv->ht40_2s_tx_power_index_diff,
+	       priv->efuse_wifi.efuse8192.ht40_2s_tx_power_index_diff,
+	       sizeof(priv->ht40_2s_tx_power_index_diff));
+
+	memcpy(priv->ht20_tx_power_index_diff,
+	       priv->efuse_wifi.efuse8192.ht20_tx_power_index_diff,
+	       sizeof(priv->ht20_tx_power_index_diff));
+	memcpy(priv->ofdm_tx_power_index_diff,
+	       priv->efuse_wifi.efuse8192.ofdm_tx_power_index_diff,
+	       sizeof(priv->ofdm_tx_power_index_diff));
+
+	memcpy(priv->ht40_max_power_offset,
+	       priv->efuse_wifi.efuse8192.ht40_max_power_offset,
+	       sizeof(priv->ht40_max_power_offset));
+	memcpy(priv->ht20_max_power_offset,
+	       priv->efuse_wifi.efuse8192.ht20_max_power_offset,
+	       sizeof(priv->ht20_max_power_offset));
+
+	dev_info(&priv->udev->dev, "Vendor: %.7s\n",
+		 priv->efuse_wifi.efuse8192.vendor_name);
+	dev_info(&priv->udev->dev, "Product: %.20s\n",
+		 priv->efuse_wifi.efuse8192.device_name);
+
+	if (priv->efuse_wifi.efuse8192.rf_regulatory & 0x20) {
+		sprintf(priv->chip_name, "8188RU");
+		priv->hi_pa = 1;
+	}
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+		unsigned char *raw = priv->efuse_wifi.raw;
+
+		dev_info(&priv->udev->dev,
+			 "%s: dumping efuse (0x%02zx bytes):\n",
+			 __func__, sizeof(struct rtl8192cu_efuse));
+		for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8) {
+			dev_info(&priv->udev->dev, "%02x: "
+				 "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+				 raw[i], raw[i + 1], raw[i + 2],
+				 raw[i + 3], raw[i + 4], raw[i + 5],
+				 raw[i + 6], raw[i + 7]);
+		}
+	}
+	return 0;
+}
+
+static int
+rtl8xxxu_read_efuse8(struct rtl8xxxu_priv *priv, u16 offset, u8 *data)
+{
+	int i;
+	u8 val8;
+	u32 val32;
+
+	/* Write Address */
+	rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 1, offset & 0xff);
+	val8 = rtl8xxxu_read8(priv, REG_EFUSE_CTRL + 2);
+	val8 &= 0xfc;
+	val8 |= (offset >> 8) & 0x03;
+	rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 2, val8);
+
+	val8 = rtl8xxxu_read8(priv, REG_EFUSE_CTRL + 3);
+	rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 3, val8 & 0x7f);
+
+	/* Poll for data read */
+	val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+	for (i = 0; i < RTL8XXXU_MAX_REG_POLL; i++) {
+		val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+		if (val32 & BIT(31))
+			break;
+	}
+
+	if (i == RTL8XXXU_MAX_REG_POLL)
+		return -EIO;
+
+	udelay(50);
+	val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+
+	*data = val32 & 0xff;
+	return 0;
+}
+
+static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
+{
+	struct device *dev = &priv->udev->dev;
+	int i, ret = 0;
+	u8 val8, word_mask, header, extheader;
+	u16 val16, efuse_addr, offset;
+	u32 val32;
+
+	val16 = rtl8xxxu_read16(priv, REG_9346CR);
+	if (val16 & EEPROM_ENABLE)
+		priv->has_eeprom = 1;
+	if (val16 & EEPROM_BOOT)
+		priv->boot_eeprom = 1;
+
+	val32 = rtl8xxxu_read32(priv, REG_EFUSE_TEST);
+	val32 = (val32 & ~EFUSE_SELECT_MASK) | EFUSE_WIFI_SELECT;
+	rtl8xxxu_write32(priv, REG_EFUSE_TEST, val32);
+
+	dev_dbg(dev, "Booting from %s\n",
+		priv->boot_eeprom ? "EEPROM" : "EFUSE");
+
+	rtl8xxxu_write8(priv, REG_EFUSE_ACCESS, EFUSE_ACCESS_ENABLE);
+
+	/*  1.2V Power: From VDDON with Power Cut(0x0000[15]), default valid */
+	val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL);
+	if (!(val16 & SYS_ISO_PWC_EV12V)) {
+		val16 |= SYS_ISO_PWC_EV12V;
+		rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16);
+	}
+	/*  Reset: 0x0000[28], default valid */
+	val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+	if (!(val16 & SYS_FUNC_ELDR)) {
+		val16 |= SYS_FUNC_ELDR;
+		rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+	}
+
+	/*
+	 * Clock: Gated(0x0008[5]) 8M(0x0008[1]) clock from ANA, default valid
+	 */
+	val16 = rtl8xxxu_read16(priv, REG_SYS_CLKR);
+	if (!(val16 & SYS_CLK_LOADER_ENABLE) || !(val16 & SYS_CLK_ANA8M)) {
+		val16 |= (SYS_CLK_LOADER_ENABLE | SYS_CLK_ANA8M);
+		rtl8xxxu_write16(priv, REG_SYS_CLKR, val16);
+	}
+
+	/* Default value is 0xff */
+	memset(priv->efuse_wifi.raw, 0xff, EFUSE_MAP_LEN_8723A);
+
+	efuse_addr = 0;
+	while (efuse_addr < EFUSE_REAL_CONTENT_LEN_8723A) {
+		ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &header);
+		if (ret || header == 0xff)
+			goto exit;
+
+		if ((header & 0x1f) == 0x0f) {	/* extended header */
+			offset = (header & 0xe0) >> 5;
+
+			ret = rtl8xxxu_read_efuse8(priv, efuse_addr++,
+						   &extheader);
+			if (ret)
+				goto exit;
+			/* All words disabled */
+			if ((extheader & 0x0f) == 0x0f)
+				continue;
+
+			offset |= ((extheader & 0xf0) >> 1);
+			word_mask = extheader & 0x0f;
+		} else {
+			offset = (header >> 4) & 0x0f;
+			word_mask = header & 0x0f;
+		}
+
+		if (offset < EFUSE_MAX_SECTION_8723A) {
+			u16 map_addr;
+			/* Get word enable value from PG header */
+
+			/* We have 8 bits to indicate validity */
+			map_addr = offset * 8;
+			if (map_addr >= EFUSE_MAP_LEN_8723A) {
+				dev_warn(dev, "%s: Illegal map_addr (%04x), "
+					 "efuse corrupt!\n",
+					 __func__, map_addr);
+				ret = -EINVAL;
+				goto exit;
+			}
+			for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+				/* Check word enable condition in the section */
+				if (!(word_mask & BIT(i))) {
+					ret = rtl8xxxu_read_efuse8(priv,
+								   efuse_addr++,
+								   &val8);
+					if (ret)
+						goto exit;
+					priv->efuse_wifi.raw[map_addr++] = val8;
+
+					ret = rtl8xxxu_read_efuse8(priv,
+								   efuse_addr++,
+								   &val8);
+					if (ret)
+						goto exit;
+					priv->efuse_wifi.raw[map_addr++] = val8;
+				} else
+					map_addr += 2;
+			}
+		} else {
+			dev_warn(dev,
+				 "%s: Illegal offset (%04x), efuse corrupt!\n",
+				 __func__, offset);
+			ret = -EINVAL;
+			goto exit;
+		}
+	}
+
+exit:
+	rtl8xxxu_write8(priv, REG_EFUSE_ACCESS, EFUSE_ACCESS_DISABLE);
+
+	return ret;
+}
+
+static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv)
+{
+	struct device *dev = &priv->udev->dev;
+	int ret = 0, i;
+	u32 val32;
+
+	/* Poll checksum report */
+	for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) {
+		val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+		if (val32 & MCU_FW_DL_CSUM_REPORT)
+			break;
+	}
+
+	if (i == RTL8XXXU_FIRMWARE_POLL_MAX) {
+		dev_warn(dev, "Firmware checksum poll timed out\n");
+		ret = -EAGAIN;
+		goto exit;
+	}
+
+	val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+	val32 |= MCU_FW_DL_READY;
+	val32 &= ~MCU_WINT_INIT_READY;
+	rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32);
+
+	/* Wait for firmware to become ready */
+	for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) {
+		val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+		if (val32 & MCU_WINT_INIT_READY)
+			break;
+
+		udelay(100);
+	}
+
+	if (i == RTL8XXXU_FIRMWARE_POLL_MAX) {
+		dev_warn(dev, "Firmware failed to start\n");
+		ret = -EAGAIN;
+		goto exit;
+	}
+
+exit:
+	return ret;
+}
+
+static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv)
+{
+	int pages, remainder, i, ret;
+	u8 val8;
+	u16 val16;
+	u32 val32;
+	u8 *fwptr;
+
+	val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC + 1);
+	val8 |= 4;
+	rtl8xxxu_write8(priv, REG_SYS_FUNC + 1, val8);
+
+	/* 8051 enable */
+	val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+	rtl8xxxu_write16(priv, REG_SYS_FUNC, val16 | SYS_FUNC_CPU_ENABLE);
+
+	/* MCU firmware download enable */
+	val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL);
+	rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_ENABLE);
+
+	/* 8051 reset */
+	val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+	rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32 & ~BIT(19));
+
+	/* Reset firmware download checksum */
+	val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL);
+	rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_CSUM_REPORT);
+
+	pages = priv->fw_size / RTL_FW_PAGE_SIZE;
+	remainder = priv->fw_size % RTL_FW_PAGE_SIZE;
+
+	fwptr = priv->fw_data->data;
+
+	for (i = 0; i < pages; i++) {
+		val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8;
+		rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i);
+
+		ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS,
+				      fwptr, RTL_FW_PAGE_SIZE);
+		if (ret != RTL_FW_PAGE_SIZE) {
+			ret = -EAGAIN;
+			goto fw_abort;
+		}
+
+		fwptr += RTL_FW_PAGE_SIZE;
+	}
+
+	if (remainder) {
+		val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8;
+		rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i);
+		ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS,
+				      fwptr, remainder);
+		if (ret != remainder) {
+			ret = -EAGAIN;
+			goto fw_abort;
+		}
+	}
+
+	ret = 0;
+fw_abort:
+	/* MCU firmware download disable */
+	val16 = rtl8xxxu_read16(priv, REG_MCU_FW_DL);
+	rtl8xxxu_write16(priv, REG_MCU_FW_DL,
+			 val16 & (~MCU_FW_DL_ENABLE & 0xff));
+
+	return ret;
+}
+
+static int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
+{
+	struct device *dev = &priv->udev->dev;
+	const struct firmware *fw;
+	int ret = 0;
+	u16 signature;
+
+	dev_info(dev, "%s: Loading firmware %s\n", DRIVER_NAME, fw_name);
+	if (request_firmware(&fw, fw_name, &priv->udev->dev)) {
+		dev_warn(dev, "request_firmware(%s) failed\n", fw_name);
+		ret = -EAGAIN;
+		goto exit;
+	}
+	if (!fw) {
+		dev_warn(dev, "Firmware data not available\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	priv->fw_data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+	priv->fw_size = fw->size - sizeof(struct rtl8xxxu_firmware_header);
+
+	signature = le16_to_cpu(priv->fw_data->signature);
+	switch (signature & 0xfff0) {
+	case 0x92c0:
+	case 0x88c0:
+	case 0x2300:
+		break;
+	default:
+		ret = -EINVAL;
+		dev_warn(dev, "%s: Invalid firmware signature: 0x%04x\n",
+			 __func__, signature);
+	}
+
+	dev_info(dev, "Firmware revision %i.%i (signature 0x%04x)\n",
+		 le16_to_cpu(priv->fw_data->major_version),
+		 priv->fw_data->minor_version, signature);
+
+exit:
+	release_firmware(fw);
+	return ret;
+}
+
+static int rtl8723au_load_firmware(struct rtl8xxxu_priv *priv)
+{
+	char *fw_name;
+	int ret;
+
+	switch (priv->chip_cut) {
+	case 0:
+		fw_name = "rtlwifi/rtl8723aufw_A.bin";
+		break;
+	case 1:
+		if (priv->enable_bluetooth)
+			fw_name = "rtlwifi/rtl8723aufw_B.bin";
+		else
+			fw_name = "rtlwifi/rtl8723aufw_B_NoBT.bin";
+
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = rtl8xxxu_load_firmware(priv, fw_name);
+	return ret;
+}
+
+static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+	char *fw_name;
+	int ret;
+
+	if (!priv->vendor_umc)
+		fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
+	else if (priv->chip_cut || priv->rtlchip == 0x8192c)
+		fw_name = "rtlwifi/rtl8192cufw_B.bin";
+	else
+		fw_name = "rtlwifi/rtl8192cufw_A.bin";
+
+	ret = rtl8xxxu_load_firmware(priv, fw_name);
+
+	return ret;
+}
+
+static void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv)
+{
+	u16 val16;
+	int i = 100;
+
+	/* Inform 8051 to perform reset */
+	rtl8xxxu_write8(priv, REG_HMTFR + 3, 0x20);
+
+	for (i = 100; i > 0; i--) {
+		val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+
+		if (!(val16 & SYS_FUNC_CPU_ENABLE)) {
+			dev_dbg(&priv->udev->dev,
+				"%s: Firmware self reset success!\n", __func__);
+			break;
+		}
+		udelay(50);
+	}
+
+	if (!i) {
+		/* Force firmware reset */
+		val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+		val16 &= ~SYS_FUNC_CPU_ENABLE;
+		rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+	}
+}
+
+static int
+rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
+{
+	int i, ret;
+	u16 reg;
+	u8 val;
+
+	for (i = 0; ; i++) {
+		reg = array[i].reg;
+		val = array[i].val;
+
+		if (reg == 0xffff && val == 0xff)
+			break;
+
+		ret = rtl8xxxu_write8(priv, reg, val);
+		if (ret != 1) {
+			dev_warn(&priv->udev->dev,
+				 "Failed to initialize MAC\n");
+			return -EAGAIN;
+		}
+	}
+
+	rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
+
+	return 0;
+}
+
+static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
+				  struct rtl8xxxu_reg32val *array)
+{
+	int i, ret;
+	u16 reg;
+	u32 val;
+
+	for (i = 0; ; i++) {
+		reg = array[i].reg;
+		val = array[i].val;
+
+		if (reg == 0xffff && val == 0xffffffff)
+			break;
+
+		ret = rtl8xxxu_write32(priv, reg, val);
+		if (ret != sizeof(val)) {
+			dev_warn(&priv->udev->dev,
+				 "Failed to initialize PHY\n");
+			return -EAGAIN;
+		}
+		udelay(1);
+	}
+
+	return 0;
+}
+
+/*
+ * Most of this is black magic retrieved from the old rtl8723au driver
+ */
+static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+	u8 val8, ldoa15, ldov12d, lpldo, ldohci12;
+	u32 val32;
+
+	/*
+	 * Todo: The vendor driver maintains a table of PHY register
+	 *       addresses, which is initialized here. Do we need this?
+	 */
+
+	val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+	udelay(2);
+	val8 |= AFE_PLL_320_ENABLE;
+	rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+	udelay(2);
+
+	rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
+	udelay(2);
+
+	val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+	val8 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
+	rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+	/* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */
+	val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+	val32 &= ~AFE_XTAL_RF_GATE;
+	if (priv->has_bluetooth)
+		val32 &= ~AFE_XTAL_BT_GATE;
+	rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
+
+	/* 6. 0x1f[7:0] = 0x07 */
+	val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+	rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+	if (priv->hi_pa)
+		rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table);
+	else if (priv->tx_paths == 2)
+		rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table);
+	else
+		rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table);
+
+
+	if (priv->rtlchip == 0x8188c && priv->hi_pa &&
+	    priv->vendor_umc && priv->chip_cut == 1)
+		rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50);
+
+	if (priv->tx_paths == 1 && priv->rx_paths == 2) {
+		/*
+		 * For 1T2R boards, patch the registers.
+		 *
+		 * It looks like 8191/2 1T2R boards use path B for TX
+		 */
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_TX_INFO);
+		val32 &= ~(BIT(0) | BIT(1));
+		val32 |= BIT(1);
+		rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA1_TX_INFO);
+		val32 &= ~0x300033;
+		val32 |= 0x200022;
+		rtl8xxxu_write32(priv, REG_FPGA1_TX_INFO, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+		val32 &= 0xff000000;
+		val32 |= 0x45000000;
+		rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+		val32 &= ~(OFDM_RF_PATH_RX_MASK | OFDM_RF_PATH_TX_MASK);
+		val32 |= (OFDM_RF_PATH_RX_A | OFDM_RF_PATH_RX_B |
+			  OFDM_RF_PATH_TX_B);
+		rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_OFDM0_AGC_PARM1);
+		val32 &= ~(BIT(4) | BIT(5));
+		val32 |= BIT(4);
+		rtl8xxxu_write32(priv, REG_OFDM0_AGC_PARM1, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_TX_CCK_RFON);
+		val32 &= ~(BIT(27) | BIT(26));
+		val32 |= BIT(27);
+		rtl8xxxu_write32(priv, REG_TX_CCK_RFON, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_TX_CCK_BBON);
+		val32 &= ~(BIT(27) | BIT(26));
+		val32 |= BIT(27);
+		rtl8xxxu_write32(priv, REG_TX_CCK_BBON, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_TX_OFDM_RFON);
+		val32 &= ~(BIT(27) | BIT(26));
+		val32 |= BIT(27);
+		rtl8xxxu_write32(priv, REG_TX_OFDM_RFON, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_TX_OFDM_BBON);
+		val32 &= ~(BIT(27) | BIT(26));
+		val32 |= BIT(27);
+		rtl8xxxu_write32(priv, REG_TX_OFDM_BBON, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_TX_TO_TX);
+		val32 &= ~(BIT(27) | BIT(26));
+		val32 |= BIT(27);
+		rtl8xxxu_write32(priv, REG_TX_TO_TX, val32);
+	}
+
+	if (priv->hi_pa)
+		rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
+	else
+		rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table);
+
+	if (priv->rtlchip == 0x8723a &&
+	    priv->efuse_wifi.efuse8723.version >= 0x01) {
+		val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL);
+
+		val8 = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
+		val32 &= 0xff000fff;
+		val32 |= ((val8 | (val8 << 6)) << 12);
+
+		rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32);
+	}
+
+	ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
+	ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
+	ldohci12 = 0x57;
+	lpldo = 1;
+	val32 = (lpldo << 24) | (ldohci12 << 16) | (ldov12d << 8) | ldoa15;
+
+	rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
+
+	return 0;
+}
+
+static int rtl8xxxu_init_rf_regs(struct rtl8xxxu_priv *priv,
+				 struct rtl8xxxu_rfregval *array,
+				 enum rtl8xxxu_rfpath path)
+{
+	int i, ret;
+	u8 reg;
+	u32 val;
+
+	for (i = 0; ; i++) {
+		reg = array[i].reg;
+		val = array[i].val;
+
+		if (reg == 0xff && val == 0xffffffff)
+			break;
+
+		switch (reg) {
+		case 0xfe:
+			msleep(50);
+			continue;
+		case 0xfd:
+			mdelay(5);
+			continue;
+		case 0xfc:
+			mdelay(1);
+			continue;
+		case 0xfb:
+			udelay(50);
+			continue;
+		case 0xfa:
+			udelay(5);
+			continue;
+		case 0xf9:
+			udelay(1);
+			continue;
+		}
+
+		reg &= 0x3f;
+
+		ret = rtl8xxxu_write_rfreg(priv, path, reg, val);
+		if (ret) {
+			dev_warn(&priv->udev->dev,
+				 "Failed to initialize RF\n");
+			return -EAGAIN;
+		}
+		udelay(1);
+	}
+
+	return 0;
+}
+
+static int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
+				struct rtl8xxxu_rfregval *table,
+				enum rtl8xxxu_rfpath path)
+{
+	u32 val32;
+	u16 val16, rfsi_rfenv;
+	u16 reg_sw_ctrl, reg_int_oe, reg_hssi_parm2;
+
+	switch (path) {
+	case RF_A:
+		reg_sw_ctrl = REG_FPGA0_XA_RF_SW_CTRL;
+		reg_int_oe = REG_FPGA0_XA_RF_INT_OE;
+		reg_hssi_parm2 = REG_FPGA0_XA_HSSI_PARM2;
+		break;
+	case RF_B:
+		reg_sw_ctrl = REG_FPGA0_XB_RF_SW_CTRL;
+		reg_int_oe = REG_FPGA0_XB_RF_INT_OE;
+		reg_hssi_parm2 = REG_FPGA0_XB_HSSI_PARM2;
+		break;
+	default:
+		dev_err(&priv->udev->dev, "%s:Unsupported RF path %c\n",
+			__func__, path + 'A');
+		return -EINVAL;
+	}
+	/* For path B, use XB */
+	rfsi_rfenv = rtl8xxxu_read16(priv, reg_sw_ctrl);
+	rfsi_rfenv &= FPGA0_RF_RFENV;
+
+	/*
+	 * These two we might be able to optimize into one
+	 */
+	val32 = rtl8xxxu_read32(priv, reg_int_oe);
+	val32 |= BIT(20);	/* 0x10 << 16 */
+	rtl8xxxu_write32(priv, reg_int_oe, val32);
+	udelay(1);
+
+	val32 = rtl8xxxu_read32(priv, reg_int_oe);
+	val32 |= BIT(4);
+	rtl8xxxu_write32(priv, reg_int_oe, val32);
+	udelay(1);
+
+	/*
+	 * These two we might be able to optimize into one
+	 */
+	val32 = rtl8xxxu_read32(priv, reg_hssi_parm2);
+	val32 &= ~FPGA0_HSSI_3WIRE_ADDR_LEN;
+	rtl8xxxu_write32(priv, reg_hssi_parm2, val32);
+	udelay(1);
+
+	val32 = rtl8xxxu_read32(priv, reg_hssi_parm2);
+	val32 &= ~FPGA0_HSSI_3WIRE_DATA_LEN;
+	rtl8xxxu_write32(priv, reg_hssi_parm2, val32);
+	udelay(1);
+
+	rtl8xxxu_init_rf_regs(priv, table, path);
+
+	/* For path B, use XB */
+	val16 = rtl8xxxu_read16(priv, reg_sw_ctrl);
+	val16 &= ~FPGA0_RF_RFENV;
+	val16 |= rfsi_rfenv;
+	rtl8xxxu_write16(priv, reg_sw_ctrl, val16);
+
+	return 0;
+}
+
+static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data)
+{
+	int ret = -EBUSY;
+	int count = 0;
+	u32 value;
+
+	value = LLT_OP_WRITE | address << 8 | data;
+
+	rtl8xxxu_write32(priv, REG_LLT_INIT, value);
+
+	do {
+		value = rtl8xxxu_read32(priv, REG_LLT_INIT);
+		if ((value & LLT_OP_MASK) == LLT_OP_INACTIVE) {
+			ret = 0;
+			break;
+		}
+	} while (count++ < 20);
+
+	return ret;
+}
+
+static int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+{
+	int ret;
+	int i;
+
+	for (i = 0; i < last_tx_page; i++) {
+		ret = rtl8xxxu_llt_write(priv, i, i + 1);
+		if (ret)
+			goto exit;
+	}
+
+	ret = rtl8xxxu_llt_write(priv, last_tx_page, 0xff);
+	if (ret)
+		goto exit;
+
+	/* Mark remaining pages as a ring buffer */
+	for (i = last_tx_page + 1; i < 0xff; i++) {
+		ret = rtl8xxxu_llt_write(priv, i, (i + 1));
+		if (ret)
+			goto exit;
+	}
+
+	/*  Let last entry point to the start entry of ring buffer */
+	ret = rtl8xxxu_llt_write(priv, 0xff, last_tx_page + 1);
+	if (ret)
+		goto exit;
+
+exit:
+	return ret;
+}
+
+static int rtl8xxxu_init_queue_priority(struct rtl8xxxu_priv *priv)
+{
+	u16 val16, hi, lo;
+	u16 hiq, mgq, bkq, beq, viq, voq;
+	int hip, mgp, bkp, bep, vip, vop;
+	int ret = 0;
+
+	switch (priv->ep_tx_count) {
+	case 1:
+		if (priv->ep_tx_high_queue) {
+			hi = TRXDMA_QUEUE_HIGH;
+		} else if (priv->ep_tx_low_queue) {
+			hi = TRXDMA_QUEUE_LOW;
+		} else if (priv->ep_tx_normal_queue) {
+			hi = TRXDMA_QUEUE_NORMAL;
+		} else {
+			hi = 0;
+			ret = -EINVAL;
+		}
+
+		hiq = hi;
+		mgq = hi;
+		bkq = hi;
+		beq = hi;
+		viq = hi;
+		voq = hi;
+
+		hip = 0;
+		mgp = 0;
+		bkp = 0;
+		bep = 0;
+		vip = 0;
+		vop = 0;
+		break;
+	case 2:
+		if (priv->ep_tx_high_queue && priv->ep_tx_low_queue) {
+			hi = TRXDMA_QUEUE_HIGH;
+			lo = TRXDMA_QUEUE_LOW;
+		} else if (priv->ep_tx_normal_queue && priv->ep_tx_low_queue) {
+			hi = TRXDMA_QUEUE_NORMAL;
+			lo = TRXDMA_QUEUE_LOW;
+		} else if (priv->ep_tx_high_queue && priv->ep_tx_normal_queue) {
+			hi = TRXDMA_QUEUE_HIGH;
+			lo = TRXDMA_QUEUE_NORMAL;
+		} else {
+			ret = -EINVAL;
+			hi = 0;
+			lo = 0;
+		}
+
+		hiq = hi;
+		mgq = hi;
+		bkq = lo;
+		beq = lo;
+		viq = hi;
+		voq = hi;
+
+		hip = 0;
+		mgp = 0;
+		bkp = 1;
+		bep = 1;
+		vip = 0;
+		vop = 0;
+		break;
+	case 3:
+		beq = TRXDMA_QUEUE_LOW;
+		bkq = TRXDMA_QUEUE_LOW;
+		viq = TRXDMA_QUEUE_NORMAL;
+		voq = TRXDMA_QUEUE_HIGH;
+		mgq = TRXDMA_QUEUE_HIGH;
+		hiq = TRXDMA_QUEUE_HIGH;
+
+		hip = hiq ^ 3;
+		mgp = mgq ^ 3;
+		bkp = bkq ^ 3;
+		bep = beq ^ 3;
+		vip = viq ^ 3;
+		vop = viq ^ 3;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	/*
+	 * None of the vendor drivers are configuring the beacon
+	 * queue here .... why?
+	 */
+	if (!ret) {
+		val16 = rtl8xxxu_read16(priv, REG_TRXDMA_CTRL);
+		val16 &= 0x7;
+		val16 |= (voq << TRXDMA_CTRL_VOQ_SHIFT) |
+			(viq << TRXDMA_CTRL_VIQ_SHIFT) |
+			(beq << TRXDMA_CTRL_BEQ_SHIFT) |
+			(bkq << TRXDMA_CTRL_BKQ_SHIFT) |
+			(mgq << TRXDMA_CTRL_MGQ_SHIFT) |
+			(hiq << TRXDMA_CTRL_HIQ_SHIFT);
+		rtl8xxxu_write16(priv, REG_TRXDMA_CTRL, val16);
+
+		priv->pipe_out[TXDESC_QUEUE_VO] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[vop]);
+		priv->pipe_out[TXDESC_QUEUE_VI] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[vip]);
+		priv->pipe_out[TXDESC_QUEUE_BE] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[bep]);
+		priv->pipe_out[TXDESC_QUEUE_BK] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[bkp]);
+		priv->pipe_out[TXDESC_QUEUE_BEACON] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[0]);
+		priv->pipe_out[TXDESC_QUEUE_MGNT] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[mgp]);
+		priv->pipe_out[TXDESC_QUEUE_HIGH] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[hip]);
+		priv->pipe_out[TXDESC_QUEUE_CMD] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[0]);
+	}
+
+	return ret;
+}
+
+static void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv,
+				       bool iqk_ok, int result[][8],
+				       int candidate, bool tx_only)
+{
+	u32 oldval, x, tx0_a, reg;
+	int y, tx0_c;
+	u32 val32;
+
+	if (!iqk_ok)
+		return;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
+	oldval = val32 >> 22;
+
+	x = result[candidate][0];
+	if ((x & 0x00000200) != 0)
+		x = x | 0xfffffc00;
+	tx0_a = (x * oldval) >> 8;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
+	val32 &= ~0x3ff;
+	val32 |= tx0_a;
+	rtl8xxxu_write32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+	val32 &= ~BIT(31);
+	if ((x * oldval >> 7) & 0x1)
+		val32 |= BIT(31);
+	rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+	y = result[candidate][1];
+	if ((y & 0x00000200) != 0)
+		y = y | 0xfffffc00;
+	tx0_c = (y * oldval) >> 8;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XC_TX_AFE);
+	val32 &= ~0xf0000000;
+	val32 |= (((tx0_c & 0x3c0) >> 6) << 28);
+	rtl8xxxu_write32(priv, REG_OFDM0_XC_TX_AFE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
+	val32 &= ~0x003f0000;
+	val32 |= ((tx0_c & 0x3f) << 16);
+	rtl8xxxu_write32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+	val32 &= ~BIT(29);
+	if ((y * oldval >> 7) & 0x1)
+		val32 |= BIT(29);
+	rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+	if (tx_only) {
+		dev_dbg(&priv->udev->dev, "%s: only TX\n", __func__);
+		return;
+	}
+
+	reg = result[candidate][2];
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE);
+	val32 &= ~0x3ff;
+	val32 |= (reg & 0x3ff);
+	rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE, val32);
+
+	reg = result[candidate][3] & 0x3F;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE);
+	val32 &= ~0xfc00;
+	val32 |= ((reg << 10) & 0xfc00);
+	rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE, val32);
+
+	reg = (result[candidate][3] >> 6) & 0xF;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_RX_IQ_EXT_ANTA);
+	val32 &= ~0xf0000000;
+	val32 |= (reg << 28);
+	rtl8xxxu_write32(priv, REG_OFDM0_RX_IQ_EXT_ANTA, val32);
+}
+
+static void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv,
+				       bool iqk_ok, int result[][8],
+				       int candidate, bool tx_only)
+{
+	u32 oldval, x, tx1_a, reg;
+	int y, tx1_c;
+	u32 val32;
+
+	if (!iqk_ok)
+		return;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE);
+	oldval = val32 >> 22;
+
+	x = result[candidate][4];
+	if ((x & 0x00000200) != 0)
+		x = x | 0xfffffc00;
+	tx1_a = (x * oldval) >> 8;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE);
+	val32 &= ~0x3ff;
+	val32 |= tx1_a;
+	rtl8xxxu_write32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+	val32 &= ~BIT(27);
+	if ((x * oldval >> 7) & 0x1)
+		val32 |= BIT(27);
+	rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+	y = result[candidate][5];
+	if ((y & 0x00000200) != 0)
+		y = y | 0xfffffc00;
+	tx1_c = (y * oldval) >> 8;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XD_TX_AFE);
+	val32 &= ~0xf0000000;
+	val32 |= (((tx1_c & 0x3c0) >> 6) << 28);
+	rtl8xxxu_write32(priv, REG_OFDM0_XD_TX_AFE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE);
+	val32 &= ~0x003f0000;
+	val32 |= ((tx1_c & 0x3f) << 16);
+	rtl8xxxu_write32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+	val32 &= ~BIT(25);
+	if ((y * oldval >> 7) & 0x1)
+		val32 |= BIT(25);
+	rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+	if (tx_only) {
+		dev_dbg(&priv->udev->dev, "%s: only TX\n", __func__);
+		return;
+	}
+
+	reg = result[candidate][6];
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE);
+	val32 &= ~0x3ff;
+	val32 |= (reg & 0x3ff);
+	rtl8xxxu_write32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE, val32);
+
+	reg = result[candidate][7] & 0x3f;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE);
+	val32 &= ~0xfc00;
+	val32 |= ((reg << 10) & 0xfc00);
+	rtl8xxxu_write32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE, val32);
+
+	reg = (result[candidate][7] >> 6) & 0xf;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_AGCR_SSI_TABLE);
+	val32 &= ~0x0000f000;
+	val32 |= (reg << 12);
+	rtl8xxxu_write32(priv, REG_OFDM0_AGCR_SSI_TABLE, val32);
+}
+
+#define MAX_TOLERANCE		5
+
+static bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
+					int result[][8], int c1, int c2)
+{
+	u32 i, j, diff, simubitmap, bound = 0;
+	int candidate[2] = {-1, -1};	/* for path A and path B */
+	bool retval = true;
+
+	if (priv->tx_paths > 1)
+		bound = 8;
+	else
+		bound = 4;
+
+	simubitmap = 0;
+
+	for (i = 0; i < bound; i++) {
+		diff = (result[c1][i] > result[c2][i]) ?
+			(result[c1][i] - result[c2][i]) :
+			(result[c2][i] - result[c1][i]);
+		if (diff > MAX_TOLERANCE) {
+			if ((i == 2 || i == 6) && !simubitmap) {
+				if (result[c1][i] + result[c1][i + 1] == 0)
+					candidate[(i / 4)] = c2;
+				else if (result[c2][i] + result[c2][i + 1] == 0)
+					candidate[(i / 4)] = c1;
+				else
+					simubitmap = simubitmap | (1 << i);
+			} else {
+				simubitmap = simubitmap | (1 << i);
+			}
+		}
+	}
+
+	if (simubitmap == 0) {
+		for (i = 0; i < (bound / 4); i++) {
+			if (candidate[i] >= 0) {
+				for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+					result[3][j] = result[candidate[i]][j];
+				retval = false;
+			}
+		}
+		return retval;
+	} else if (!(simubitmap & 0x0f)) {
+		/* path A OK */
+		for (i = 0; i < 4; i++)
+			result[3][i] = result[c1][i];
+	} else if (!(simubitmap & 0xf0) && priv->tx_paths > 1) {
+		/* path B OK */
+		for (i = 4; i < 8; i++)
+			result[3][i] = result[c1][i];
+	}
+
+	return false;
+}
+
+static void
+rtl8xxxu_save_mac_regs(struct rtl8xxxu_priv *priv, const u32 *reg, u32 *backup)
+{
+	int i;
+
+	for (i = 0; i < (RTL8XXXU_MAC_REGS - 1); i++)
+		backup[i] = rtl8xxxu_read8(priv, reg[i]);
+
+	backup[i] = rtl8xxxu_read32(priv, reg[i]);
+}
+
+static void rtl8xxxu_restore_mac_regs(struct rtl8xxxu_priv *priv,
+				      const u32 *reg, u32 *backup)
+{
+	int i;
+
+	for (i = 0; i < (RTL8XXXU_MAC_REGS - 1); i++)
+		rtl8xxxu_write8(priv, reg[i], backup[i]);
+
+	rtl8xxxu_write32(priv, reg[i], backup[i]);
+}
+
+static void rtl8xxxu_save_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+			       u32 *backup, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++)
+		backup[i] = rtl8xxxu_read32(priv, regs[i]);
+}
+
+static void rtl8xxxu_restore_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+				  u32 *backup, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++)
+		rtl8xxxu_write32(priv, regs[i], backup[i]);
+}
+
+
+static void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs,
+				  bool path_a_on)
+{
+	u32 path_on;
+	int i;
+
+	path_on = path_a_on ? 0x04db25a4 : 0x0b1b25a4;
+	if (priv->tx_paths == 1) {
+		path_on = 0x0bdb25a0;
+		rtl8xxxu_write32(priv, regs[0], 0x0b1b25a0);
+	} else {
+		rtl8xxxu_write32(priv, regs[0], path_on);
+	}
+
+	for (i = 1 ; i < RTL8XXXU_ADDA_REGS ; i++)
+		rtl8xxxu_write32(priv, regs[i], path_on);
+}
+
+static void rtl8xxxu_mac_calibration(struct rtl8xxxu_priv *priv,
+				     const u32 *regs, u32 *backup)
+{
+	int i = 0;
+
+	rtl8xxxu_write8(priv, regs[i], 0x3f);
+
+	for (i = 1 ; i < (RTL8XXXU_MAC_REGS - 1); i++)
+		rtl8xxxu_write8(priv, regs[i], (u8)(backup[i] & ~BIT(3)));
+
+	rtl8xxxu_write8(priv, regs[i], (u8)(backup[i] & ~BIT(5)));
+}
+
+static int rtl8xxxu_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+	u32 reg_eac, reg_e94, reg_e9c, reg_ea4, val32;
+	int result = 0;
+
+	/* path-A IQK setting */
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x10008c1f);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x10008c1f);
+	rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82140102);
+
+	val32 = (priv->rf_paths > 1) ? 0x28160202 :
+		/*IS_81xxC_VENDOR_UMC_B_CUT(pHalData->VersionID)?0x28160202: */
+		0x28160502;
+	rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, val32);
+
+	/* path-B IQK setting */
+	if (priv->rf_paths > 1) {
+		rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x10008c22);
+		rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x10008c22);
+		rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82140102);
+		rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28160202);
+	}
+
+	/* LO calibration setting */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x001028d1);
+
+	/* One shot, path A LOK & IQK */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+	mdelay(1);
+
+	/* Check failed */
+	reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+	reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+	reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+	reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+	if (!(reg_eac & BIT(28)) &&
+	    ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+	    ((reg_e9c & 0x03ff0000) != 0x00420000))
+		result |= 0x01;
+	else	/* If TX not OK, ignore RX */
+		goto out;
+
+	/* If TX is OK, check whether RX is OK */
+	if (!(reg_eac & BIT(27)) &&
+	    ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+	    ((reg_eac & 0x03ff0000) != 0x00360000))
+		result |= 0x02;
+	else
+		dev_warn(&priv->udev->dev, "%s: Path A RX IQK failed!\n",
+			 __func__);
+out:
+	return result;
+}
+
+static int rtl8xxxu_iqk_path_b(struct rtl8xxxu_priv *priv)
+{
+	u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+	int result = 0;
+
+	/* One shot, path B LOK & IQK */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000002);
+	rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000000);
+
+	mdelay(1);
+
+	/* Check failed */
+	reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+	reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+	reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+	reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
+	reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+
+	if (!(reg_eac & BIT(31)) &&
+	    ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
+	    ((reg_ebc & 0x03ff0000) != 0x00420000))
+		result |= 0x01;
+	else
+		goto out;
+
+	if (!(reg_eac & BIT(30)) &&
+	    (((reg_ec4 & 0x03ff0000) >> 16) != 0x132) &&
+	    (((reg_ecc & 0x03ff0000) >> 16) != 0x36))
+		result |= 0x02;
+	else
+		dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n",
+			 __func__);
+out:
+	return result;
+}
+
+static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+				     int result[][8], int t)
+{
+	struct device *dev = &priv->udev->dev;
+	u32 i, val32;
+	int path_a_ok, path_b_ok;
+	int retry = 2;
+	const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+		REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+		REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+		REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+		REG_TX_OFDM_BBON, REG_TX_TO_RX,
+		REG_TX_TO_TX, REG_RX_CCK,
+		REG_RX_OFDM, REG_RX_WAIT_RIFS,
+		REG_RX_TO_RX, REG_STANDBY,
+		REG_SLEEP, REG_PMPD_ANAEN
+	};
+	const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+		REG_TXPAUSE, REG_BEACON_CTRL,
+		REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+	};
+	const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+		REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+		REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+		REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+		REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE
+	};
+
+	/*
+	 * Note: IQ calibration must be performed after loading
+	 *       PHY_REG.txt , and radio_a, radio_b.txt
+	 */
+
+	if (t == 0) {
+		/* Save ADDA parameters, turn Path A ADDA on */
+		rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+				   RTL8XXXU_ADDA_REGS);
+		rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+		rtl8xxxu_save_regs(priv, iqk_bb_regs,
+				   priv->bb_backup, RTL8XXXU_BB_REGS);
+	}
+
+	rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+	if (t == 0) {
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1);
+		if (val32 & FPGA0_HSSI_PARM1_PI)
+			priv->pi_enabled = 1;
+	}
+
+	if (!priv->pi_enabled) {
+		/* Switch BB to PI mode to do IQ Calibration. */
+		rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100);
+		rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, 0x01000100);
+	}
+
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+	val32 &= ~FPGA_RF_MODE_CCK;
+	rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+	rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+	rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+	rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
+
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+	val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+	rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
+	val32 &= ~BIT(10);
+	rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32);
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE);
+	val32 &= ~BIT(10);
+	rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32);
+
+	if (priv->tx_paths > 1) {
+		rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00010000);
+		rtl8xxxu_write32(priv, REG_FPGA0_XB_LSSI_PARM, 0x00010000);
+	}
+
+	/* MAC settings */
+	rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+
+	/* Page B init */
+	rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x00080000);
+
+	if (priv->tx_paths > 1)
+		rtl8xxxu_write32(priv, REG_CONFIG_ANT_B, 0x00080000);
+
+	/* IQ calibration setting */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+	rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+	rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+	for (i = 0; i < retry; i++) {
+		path_a_ok = rtl8xxxu_iqk_path_a(priv);
+		if (path_a_ok == 0x03) {
+			val32 = rtl8xxxu_read32(priv,
+						REG_TX_POWER_BEFORE_IQK_A);
+			result[t][0] = (val32 >> 16) & 0x3ff;
+			val32 = rtl8xxxu_read32(priv,
+						REG_TX_POWER_AFTER_IQK_A);
+			result[t][1] = (val32 >> 16) & 0x3ff;
+			val32 = rtl8xxxu_read32(priv,
+						REG_RX_POWER_BEFORE_IQK_A_2);
+			result[t][2] = (val32 >> 16) & 0x3ff;
+			val32 = rtl8xxxu_read32(priv,
+						REG_RX_POWER_AFTER_IQK_A_2);
+			result[t][3] = (val32 >> 16) & 0x3ff;
+			break;
+		} else if (i == (retry - 1) && path_a_ok == 0x01) {
+			/* TX IQK OK */
+			dev_dbg(dev, "%s: Path A IQK Only Tx Success!!\n",
+				__func__);
+
+			val32 = rtl8xxxu_read32(priv,
+						REG_TX_POWER_BEFORE_IQK_A);
+			result[t][0] = (val32 >> 16) & 0x3ff;
+			val32 = rtl8xxxu_read32(priv,
+						REG_TX_POWER_AFTER_IQK_A);
+			result[t][1] = (val32 >> 16) & 0x3ff;
+		}
+	}
+
+	if (!path_a_ok)
+		dev_dbg(dev, "%s: Path A IQK failed!\n", __func__);
+
+	if (priv->tx_paths > 1) {
+		/*
+		 * Path A into standby
+		 */
+		rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x0);
+		rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00010000);
+		rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+		/* Turn Path B ADDA on */
+		rtl8xxxu_path_adda_on(priv, adda_regs, false);
+
+		for (i = 0; i < retry; i++) {
+			path_b_ok = rtl8xxxu_iqk_path_b(priv);
+			if (path_b_ok == 0x03) {
+				val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+				result[t][4] = (val32 >> 16) & 0x3ff;
+				val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+				result[t][5] = (val32 >> 16) & 0x3ff;
+				val32 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
+				result[t][6] = (val32 >> 16) & 0x3ff;
+				val32 = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+				result[t][7] = (val32 >> 16) & 0x3ff;
+				break;
+			} else if (i == (retry - 1) && path_b_ok == 0x01) {
+				/* TX IQK OK */
+				val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+				result[t][4] = (val32 >> 16) & 0x3ff;
+				val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+				result[t][5] = (val32 >> 16) & 0x3ff;
+			}
+		}
+
+		if (!path_b_ok)
+			dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
+	}
+
+	/* Back to BB mode, load original value */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0);
+
+	if (t) {
+		if (!priv->pi_enabled) {
+			/*
+			 * Switch back BB to SI mode after finishing
+			 * IQ Calibration
+			 */
+			val32 = 0x01000000;
+			rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, val32);
+			rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, val32);
+		}
+
+		/* Reload ADDA power saving parameters */
+		rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+				      RTL8XXXU_ADDA_REGS);
+
+		/* Reload MAC parameters */
+		rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+		/* Reload BB parameters */
+		rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+				      priv->bb_backup, RTL8XXXU_BB_REGS);
+
+		/* Restore RX initial gain */
+		rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00032ed3);
+
+		if (priv->tx_paths > 1) {
+			rtl8xxxu_write32(priv, REG_FPGA0_XB_LSSI_PARM,
+					 0x00032ed3);
+		}
+
+		/* Load 0xe30 IQC default value */
+		rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+		rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+	}
+}
+
+static void rtl8723a_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+	struct device *dev = &priv->udev->dev;
+	int result[4][8];	/* last is final result */
+	int i, candidate;
+	bool path_a_ok, path_b_ok;
+	u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+	u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+	s32 reg_tmp = 0;
+	bool simu;
+
+	memset(result, 0, sizeof(result));
+	candidate = -1;
+
+	path_a_ok = false;
+	path_b_ok = false;
+
+	rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+
+	for (i = 0; i < 3; i++) {
+		rtl8xxxu_phy_iqcalibrate(priv, result, i);
+
+		if (i == 1) {
+			simu = rtl8xxxu_simularity_compare(priv, result, 0, 1);
+			if (simu) {
+				candidate = 0;
+				break;
+			}
+		}
+
+		if (i == 2) {
+			simu = rtl8xxxu_simularity_compare(priv, result, 0, 2);
+			if (simu) {
+				candidate = 0;
+				break;
+			}
+
+			simu = rtl8xxxu_simularity_compare(priv, result, 1, 2);
+			if (simu) {
+				candidate = 1;
+			} else {
+				for (i = 0; i < 8; i++)
+					reg_tmp += result[3][i];
+
+				if (reg_tmp)
+					candidate = 3;
+				else
+					candidate = -1;
+			}
+		}
+	}
+
+	for (i = 0; i < 4; i++) {
+		reg_e94 = result[i][0];
+		reg_e9c = result[i][1];
+		reg_ea4 = result[i][2];
+		reg_eac = result[i][3];
+		reg_eb4 = result[i][4];
+		reg_ebc = result[i][5];
+		reg_ec4 = result[i][6];
+		reg_ecc = result[i][7];
+	}
+
+	if (candidate >= 0) {
+		reg_e94 = result[candidate][0];
+		priv->rege94 =  reg_e94;
+		reg_e9c = result[candidate][1];
+		priv->rege9c = reg_e9c;
+		reg_ea4 = result[candidate][2];
+		reg_eac = result[candidate][3];
+		reg_eb4 = result[candidate][4];
+		priv->regeb4 = reg_eb4;
+		reg_ebc = result[candidate][5];
+		priv->regebc = reg_ebc;
+		reg_ec4 = result[candidate][6];
+		reg_ecc = result[candidate][7];
+		dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+		dev_dbg(dev,
+			"%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x "
+			"ecc=%x\n ", __func__, reg_e94, reg_e9c,
+			reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+		path_a_ok = true;
+		path_b_ok = true;
+	} else {
+		reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
+		reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
+	}
+
+	if (reg_e94 && candidate >= 0)
+		rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+					   candidate, (reg_ea4 == 0));
+
+	if (priv->tx_paths > 1 && reg_eb4)
+		rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
+					   candidate, (reg_ec4 == 0));
+
+	rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+			   priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+}
+
+static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
+{
+	u32 val32;
+	u32 rf_amode, rf_bmode = 0, lstf;
+
+	/* Check continuous TX and Packet TX */
+	lstf = rtl8xxxu_read32(priv, REG_OFDM1_LSTF);
+
+	if (lstf & OFDM_LSTF_MASK) {
+		/* Disable all continuous TX */
+		val32 = lstf & ~OFDM_LSTF_MASK;
+		rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32);
+
+		/* Read original RF mode Path A */
+		rf_amode = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_AC);
+
+		/* Set RF mode to standby Path A */
+		rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC,
+				     (rf_amode & 0x8ffff) | 0x10000);
+
+		/* Path-B */
+		if (priv->tx_paths > 1) {
+			rf_bmode = rtl8xxxu_read_rfreg(priv, RF_B,
+						       RF6052_REG_AC);
+
+			rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC,
+					     (rf_bmode & 0x8ffff) | 0x10000);
+		}
+	} else {
+		/*  Deal with Packet TX case */
+		/*  block all queues */
+		rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+	}
+
+	/* Start LC calibration */
+	val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG);
+	val32 |= 0x08000;
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32);
+
+	msleep(100);
+
+	/* Restore original parameters */
+	if (lstf & OFDM_LSTF_MASK) {
+		/* Path-A */
+		rtl8xxxu_write32(priv, REG_OFDM1_LSTF, lstf);
+		rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, rf_amode);
+
+		/* Path-B */
+		if (priv->tx_paths > 1)
+			rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC,
+					     rf_bmode);
+	} else /*  Deal with Packet TX case */
+		rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+}
+
+static int rtl8xxxu_set_mac(struct rtl8xxxu_priv *priv)
+{
+	int i;
+	u16 reg;
+
+	reg = REG_MACID;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		rtl8xxxu_write8(priv, reg + i, priv->mac_addr[i]);
+
+	return 0;
+}
+
+static int rtl8xxxu_set_bssid(struct rtl8xxxu_priv *priv, const u8 *bssid)
+{
+	int i;
+	u16 reg;
+
+	dev_dbg(&priv->udev->dev, "%s: (%pM)\n", __func__, bssid);
+
+	reg = REG_BSSID;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		rtl8xxxu_write8(priv, reg + i, bssid[i]);
+
+	return 0;
+}
+
+static void
+rtl8xxxu_set_ampdu_factor(struct rtl8xxxu_priv *priv, u8 ampdu_factor)
+{
+	u8 vals[4] = { 0x41, 0xa8, 0x72, 0xb9 };
+	u8 max_agg = 0xf;
+	int i;
+
+	ampdu_factor = 1 << (ampdu_factor + 2);
+	if (ampdu_factor > max_agg)
+		ampdu_factor = max_agg;
+
+	for (i = 0; i < 4; i++) {
+		if ((vals[i] & 0xf0) > (ampdu_factor << 4))
+			vals[i] = (vals[i] & 0x0f) | (ampdu_factor << 4);
+
+		if ((vals[i] & 0x0f) > ampdu_factor)
+			vals[i] = (vals[i] & 0xf0) | ampdu_factor;
+
+		rtl8xxxu_write8(priv, REG_AGGLEN_LMT + i, vals[i]);
+	}
+}
+
+static void rtl8xxxu_set_ampdu_min_space(struct rtl8xxxu_priv *priv, u8 density)
+{
+	u8 val8;
+
+	val8 = rtl8xxxu_read8(priv, REG_AMPDU_MIN_SPACE);
+	val8 &= 0xf8;
+	val8 |= density;
+	rtl8xxxu_write8(priv, REG_AMPDU_MIN_SPACE, val8);
+}
+
+static int rtl8xxxu_active_to_emu(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	int count, ret;
+
+	/* Start of rtl8723AU_card_enable_flow */
+	/* Act to Cardemu sequence*/
+	/* Turn off RF */
+	rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+
+	/* 0x004E[7] = 0, switch DPDT_SEL_P output from register 0x0065[2] */
+	val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+	val8 &= ~LEDCFG2_DPDT_SELECT;
+	rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+	/* 0x0005[1] = 1 turn off MAC by HW state machine*/
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 |= BIT(1);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+		val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+		if ((val8 & BIT(1)) == 0)
+			break;
+		udelay(10);
+	}
+
+	if (!count) {
+		dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n",
+			 __func__);
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	/* 0x0000[5] = 1 analog Ips to digital, 1:isolation */
+	val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+	val8 |= SYS_ISO_ANALOG_IPS;
+	rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+	/* 0x0020[0] = 0 disable LDOA12 MACRO block*/
+	val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+	val8 &= ~LDOA15_ENABLE;
+	rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+exit:
+	return ret;
+}
+
+static int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u8 val32;
+	int count, ret;
+
+	rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+	/*
+	 * Poll - wait for RX packet to complete
+	 */
+	for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+		val32 = rtl8xxxu_read32(priv, 0x5f8);
+		if (!val32)
+			break;
+		udelay(10);
+	}
+
+	if (!count) {
+		dev_warn(&priv->udev->dev,
+			 "%s: RX poll timed out (0x05f8)\n", __func__);
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	/* Disable CCK and OFDM, clock gated */
+	val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+	val8 &= ~SYS_FUNC_BBRSTB;
+	rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+	udelay(2);
+
+	/* Reset baseband */
+	val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+	val8 &= ~SYS_FUNC_BB_GLB_RSTN;
+	rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+	/* Reset MAC TRX */
+	val8 = rtl8xxxu_read8(priv, REG_CR);
+	val8 = CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE;
+	rtl8xxxu_write8(priv, REG_CR, val8);
+
+	/* Reset MAC TRX */
+	val8 = rtl8xxxu_read8(priv, REG_CR + 1);
+	val8 &= ~BIT(1); /* CR_SECURITY_ENABLE */
+	rtl8xxxu_write8(priv, REG_CR + 1, val8);
+
+	/* Respond TX OK to scheduler */
+	val8 = rtl8xxxu_read8(priv, REG_DUAL_TSF_RST);
+	val8 |= DUAL_TSF_TX_OK;
+	rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, val8);
+
+exit:
+	return ret;
+}
+
+static void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+
+	/* Clear suspend enable and power down enable*/
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 &= ~(BIT(3) | BIT(7));
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	/* 0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/
+	val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2);
+	val8 &= ~BIT(0);
+	rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8);
+
+	/* 0x04[12:11] = 11 enable WL suspend*/
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 &= ~(BIT(3) | BIT(4));
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+}
+
+static int rtl8xxxu_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u32 val32;
+	int count, ret = 0;
+
+	/* 0x20[0] = 1 enable LDOA12 MACRO block for all interface*/
+	val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+	val8 |= LDOA15_ENABLE;
+	rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+	/* 0x67[0] = 0 to disable BT_GPS_SEL pins*/
+	val8 = rtl8xxxu_read8(priv, 0x0067);
+	val8 &= ~BIT(4);
+	rtl8xxxu_write8(priv, 0x0067, val8);
+
+	mdelay(1);
+
+	/* 0x00[5] = 0 release analog Ips to digital, 1:isolation */
+	val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+	val8 &= ~SYS_ISO_ANALOG_IPS;
+	rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+	/* disable SW LPS 0x04[10]= 0 */
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 &= ~BIT(2);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	/* wait till 0x04[17] = 1 power ready*/
+	for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+		val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+		if (val32 & BIT(17))
+			break;
+
+		udelay(10);
+	}
+
+	if (!count) {
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	/* We should be able to optimize the following three entries into one */
+
+	/* release WLON reset 0x04[16]= 1*/
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+	val8 |= BIT(0);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+	/* disable HWPDN 0x04[15]= 0*/
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 &= ~BIT(7);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	/* disable WL suspend*/
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 &= ~(BIT(3) | BIT(4));
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	/* set, then poll until 0 */
+	val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+	val32 |= APS_FSMCO_MAC_ENABLE;
+	rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+	for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+		val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+		if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+			ret = 0;
+			break;
+		}
+		udelay(10);
+	}
+
+	if (!count) {
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	/* 0x4C[23] = 0x4E[7] = 1, switch DPDT_SEL_P output from WL BB */
+	/*
+	 * Note: Vendor driver actually clears this bit, despite the
+	 * documentation claims it's being set!
+	 */
+	val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+	val8 |= LEDCFG2_DPDT_SELECT;
+	val8 &= ~LEDCFG2_DPDT_SELECT;
+	rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+exit:
+	return ret;
+}
+
+static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+
+	/* 0x0007[7:0] = 0x20 SOP option to disable BG/MB */
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 3, 0x20);
+
+	/* 0x04[12:11] = 01 enable WL suspend */
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 &= ~BIT(4);
+	val8 |= BIT(3);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 |= BIT(7);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	/* 0x48[16] = 1 to enable GPIO9 as EXT wakeup */
+	val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2);
+	val8 |= BIT(0);
+	rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8);
+
+	return 0;
+}
+
+static int rtl8723au_power_on(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u16 val16;
+	u32 val32;
+	int ret;
+
+	/*
+	 * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
+	 */
+	rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
+
+	rtl8xxxu_disabled_to_emu(priv);
+
+	ret = rtl8xxxu_emu_to_active(priv);
+	if (ret)
+		goto exit;
+
+	/*
+	 * 0x0004[19] = 1, reset 8051
+	 */
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+	val8 |= BIT(3);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+	/*
+	 * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+	 * Set CR bit10 to enable 32k calibration.
+	 */
+	val16 = rtl8xxxu_read16(priv, REG_CR);
+	val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+		  CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+		  CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+		  CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
+		  CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+	rtl8xxxu_write16(priv, REG_CR, val16);
+
+	/* For EFuse PG */
+	val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+	val32 &= ~(BIT(28) | BIT(29) | BIT(30));
+	val32 |= (0x06 << 28);
+	rtl8xxxu_write32(priv, REG_EFUSE_CTRL, val32);
+exit:
+	return ret;
+}
+
+static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u16 val16;
+	u32 val32;
+	int i;
+
+	for (i = 100; i; i--) {
+		val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO);
+		if (val8 & APS_FSMCO_PFM_ALDN)
+			break;
+	}
+
+	if (!i) {
+		pr_info("%s: Poll failed\n", __func__);
+		return -ENODEV;
+	}
+
+	/*
+	 * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
+	 */
+	rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
+	rtl8xxxu_write8(priv, REG_SPS0_CTRL, 0x2b);
+	udelay(100);
+
+	val8 = rtl8xxxu_read8(priv, REG_LDOV12D_CTRL);
+	if (!(val8 & LDOV12D_ENABLE)) {
+		pr_info("%s: Enabling LDOV12D (%02x)\n", __func__, val8);
+		val8 |= LDOV12D_ENABLE;
+		rtl8xxxu_write8(priv, REG_LDOV12D_CTRL, val8);
+
+		udelay(100);
+
+		val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+		val8 &= ~SYS_ISO_MD2PP;
+		rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+	}
+
+	/*
+	 * Auto enable WLAN
+	 */
+	val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+	val16 |= APS_FSMCO_MAC_ENABLE;
+	rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+	for (i = 1000; i; i--) {
+		val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+		if (!(val16 & APS_FSMCO_MAC_ENABLE))
+			break;
+	}
+	if (!i) {
+		pr_info("%s: FSMCO_MAC_ENABLE poll failed\n", __func__);
+		return -EBUSY;
+	}
+
+	/*
+	 * Enable radio, GPIO, LED
+	 */
+	val16 = APS_FSMCO_HW_SUSPEND | APS_FSMCO_ENABLE_POWERDOWN |
+		APS_FSMCO_PFM_ALDN;
+	rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+	/*
+	 * Release RF digital isolation
+	 */
+	val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL);
+	val16 &= ~SYS_ISO_DIOR;
+	rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16);
+
+	val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
+	val8 &= ~APSD_CTRL_OFF;
+	rtl8xxxu_write8(priv, REG_APSD_CTRL, val8);
+	for (i = 200; i; i--) {
+		val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
+		if (!(val8 & APSD_CTRL_OFF_STATUS))
+			break;
+	}
+
+	if (!i) {
+		pr_info("%s: APSD_CTRL poll failed\n", __func__);
+		return -EBUSY;
+	}
+
+	/*
+	 * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+	 */
+	val16 = rtl8xxxu_read16(priv, REG_CR);
+	val16 |= CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+		CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE |
+		CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE;
+	rtl8xxxu_write16(priv, REG_CR, val16);
+
+	/*
+	 * Workaround for 8188RU LNA power leakage problem.
+	 */
+	if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
+		val32 &= ~BIT(1);
+		rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
+	}
+	return 0;
+}
+
+static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u16 val16;
+	u32 val32;
+
+	/*
+	 * Workaround for 8188RU LNA power leakage problem.
+	 */
+	if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
+		val32 |= BIT(1);
+		rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
+	}
+
+	rtl8xxxu_active_to_lps(priv);
+
+	/* Turn off RF */
+	rtl8xxxu_write8(priv, REG_RF_CTRL, 0x00);
+
+	/* Reset Firmware if running in RAM */
+	if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
+		rtl8xxxu_firmware_self_reset(priv);
+
+	/* Reset MCU */
+	val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+	val16 &= ~SYS_FUNC_CPU_ENABLE;
+	rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+	/* Reset MCU ready status */
+	rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+
+	rtl8xxxu_active_to_emu(priv);
+	rtl8xxxu_emu_to_disabled(priv);
+
+	/* Reset MCU IO Wrapper */
+	val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+	val8 &= ~BIT(0);
+	rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+	val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+	val8 |= BIT(0);
+	rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+	/* RSV_CTRL 0x1C[7:0] = 0x0e  lock ISO/CLK/Power control register */
+	rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e);
+}
+
+static void rtl8xxxu_init_bt(struct rtl8xxxu_priv *priv)
+{
+	if (!priv->has_bluetooth)
+		return;
+}
+
+static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct device *dev = &priv->udev->dev;
+	struct rtl8xxxu_rfregval *rftable;
+	bool macpower;
+	int ret;
+	u8 val8;
+	u16 val16;
+	u32 val32;
+
+	/* Check if MAC is already powered on */
+	val8 = rtl8xxxu_read8(priv, REG_CR);
+
+	/*
+	 * Fix 92DU-VC S3 hang with the reason is that secondary mac is not
+	 * initialized. First MAC returns 0xea, second MAC returns 0x00
+	 */
+	if (val8 == 0xea)
+		macpower = false;
+	else
+		macpower = true;
+
+	ret = priv->fops->power_on(priv);
+	if (ret < 0) {
+		dev_warn(dev, "%s: Failed power on\n", __func__);
+		goto exit;
+	}
+
+	dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
+	if (!macpower) {
+		ret = rtl8xxxu_init_llt_table(priv, TX_TOTAL_PAGE_NUM);
+		if (ret) {
+			dev_warn(dev, "%s: LLT table init failed\n", __func__);
+			goto exit;
+		}
+	}
+
+	ret = rtl8xxxu_download_firmware(priv);
+	dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+	ret = rtl8xxxu_start_firmware(priv);
+	dev_dbg(dev, "%s: start_fiwmare %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+
+	ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table);
+	dev_dbg(dev, "%s: init_mac %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+
+	ret = rtl8xxxu_init_phy_bb(priv);
+	dev_dbg(dev, "%s: init_phy_bb %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+
+	switch(priv->rtlchip) {
+	case 0x8723a:
+		rftable = rtl8723au_radioa_1t_init_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+		break;
+	case 0x8188c:
+		if (priv->hi_pa)
+			rftable = rtl8188ru_radioa_1t_highpa_table;
+		else
+			rftable = rtl8192cu_radioa_1t_init_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+		break;
+	case 0x8191c:
+		rftable = rtl8192cu_radioa_1t_init_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+		break;
+	case 0x8192c:
+		rftable = rtl8192cu_radioa_2t_init_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+		if (ret)
+			break;
+		rftable = rtl8192cu_radiob_2t_init_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	if (ret)
+		goto exit;
+
+	/* Reduce 80M spur */
+	rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
+	rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+	rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
+	rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+
+	/* RFSW Control - clear bit 14 ?? */
+	rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
+	/* 0x07000760 */
+	val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
+		FPGA0_RF_ANTSWB | FPGA0_RF_PAPE |
+		((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) <<
+		 FPGA0_RF_BD_CTRL_SHIFT);
+	rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+	/* 0x860[6:5]= 00 - why? - this sets antenna B */
+	rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66F60210);
+
+	priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A,
+						  RF6052_REG_MODE_AG);
+
+	dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
+	if (!macpower) {
+		if (priv->ep_tx_normal_queue)
+			val8 = TX_PAGE_NUM_NORM_PQ;
+		else
+			val8 = 0;
+
+		rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
+
+		val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD;
+
+		if (priv->ep_tx_high_queue)
+			val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
+		if (priv->ep_tx_low_queue)
+			val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
+
+		rtl8xxxu_write32(priv, REG_RQPN, val32);
+
+		/*
+		 * Set TX buffer boundary
+		 */
+		val8 = TX_TOTAL_PAGE_NUM + 1;
+		rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
+		rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8);
+		rtl8xxxu_write8(priv, REG_TXPKTBUF_WMAC_LBK_BF_HD, val8);
+		rtl8xxxu_write8(priv, REG_TRXFF_BNDY, val8);
+		rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8);
+	}
+
+	ret = rtl8xxxu_init_queue_priority(priv);
+	dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+
+	/*
+	 * Set RX page boundary
+	 */
+	rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff);
+	/*
+	 * Transfer page size is always 128
+	 */
+	val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) |
+		(PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT);
+	rtl8xxxu_write8(priv, REG_PBP, val8);
+
+	/*
+	 * Unit in 8 bytes, not obvious what it is used for
+	 */
+	rtl8xxxu_write8(priv, REG_RX_DRVINFO_SZ, 4);
+
+	/*
+	 * Enable all interrupts - not obvious USB needs to do this
+	 */
+	rtl8xxxu_write32(priv, REG_HISR, 0xffffffff);
+	rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
+
+	rtl8xxxu_set_mac(priv);
+	rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION);
+
+	/*
+	 * Configure initial WMAC settings
+	 */
+	val32 = RCR_ACCEPT_PHYS_MATCH | RCR_ACCEPT_MCAST | RCR_ACCEPT_BCAST |
+		/* RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON | */
+		RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL |
+		RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC;
+	rtl8xxxu_write32(priv, REG_RCR, val32);
+
+	/*
+	 * Accept all multicast
+	 */
+	rtl8xxxu_write32(priv, REG_MAR, 0xffffffff);
+	rtl8xxxu_write32(priv, REG_MAR + 4, 0xffffffff);
+
+	/*
+	 * Init adaptive controls
+	 */
+	val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+	val32 &= ~RESPONSE_RATE_BITMAP_ALL;
+	val32 |= RESPONSE_RATE_RRSR_CCK_ONLY_1M;
+	rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
+
+	/* CCK = 0x0a, OFDM = 0x10 */
+	rtl8xxxu_set_spec_sifs(priv, 0x10, 0x10);
+	rtl8xxxu_set_retry(priv, 0x30, 0x30);
+	rtl8xxxu_set_spec_sifs(priv, 0x0a, 0x10);
+
+	/*
+	 * Init EDCA
+	 */
+	rtl8xxxu_write16(priv, REG_MAC_SPEC_SIFS, 0x100a);
+
+	/* Set CCK SIFS */
+	rtl8xxxu_write16(priv, REG_SIFS_CCK, 0x100a);
+
+	/* Set OFDM SIFS */
+	rtl8xxxu_write16(priv, REG_SIFS_OFDM, 0x100a);
+
+	/* TXOP */
+	rtl8xxxu_write32(priv, REG_EDCA_BE_PARAM, 0x005ea42b);
+	rtl8xxxu_write32(priv, REG_EDCA_BK_PARAM, 0x0000a44f);
+	rtl8xxxu_write32(priv, REG_EDCA_VI_PARAM, 0x005ea324);
+	rtl8xxxu_write32(priv, REG_EDCA_VO_PARAM, 0x002fa226);
+
+	/* Set data auto rate fallback retry count */
+	rtl8xxxu_write32(priv, REG_DARFRC, 0x00000000);
+	rtl8xxxu_write32(priv, REG_DARFRC + 4, 0x10080404);
+	rtl8xxxu_write32(priv, REG_RARFRC, 0x04030201);
+	rtl8xxxu_write32(priv, REG_RARFRC + 4, 0x08070605);
+
+	val8 = rtl8xxxu_read8(priv, REG_FWHW_TXQ_CTRL);
+	val8 |= FWHW_TXQ_CTRL_AMPDU_RETRY;
+	rtl8xxxu_write8(priv, REG_FWHW_TXQ_CTRL, val8);
+
+	/*  Set ACK timeout */
+	rtl8xxxu_write8(priv, REG_ACKTO, 0x40);
+
+	/*
+	 * Initialize beacon parameters
+	 */
+	val16 = BEACON_DISABLE_TSF_UPDATE | (BEACON_DISABLE_TSF_UPDATE << 8);
+	rtl8xxxu_write16(priv, REG_BEACON_CTRL, val16);
+	rtl8xxxu_write16(priv, REG_TBTT_PROHIBIT, 0x6404);
+	rtl8xxxu_write8(priv, REG_DRIVER_EARLY_INT, DRIVER_EARLY_INT_TIME);
+	rtl8xxxu_write8(priv, REG_BEACON_DMA_TIME, BEACON_DMA_ATIME_INT_TIME);
+	rtl8xxxu_write16(priv, REG_BEACON_TCFG, 0x660F);
+
+	/*
+	 * Enable CCK and OFDM block
+	 */
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+	val32 |= (FPGA_RF_MODE_CCK | FPGA_RF_MODE_OFDM);
+	rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+	/*
+	 * Invalidate all CAM entries - bit 30 is undocumented
+	 */
+	rtl8xxxu_write32(priv, REG_CAM_CMD, CAM_CMD_POLLING | BIT(30));
+
+	/*
+	 * Start out with default power levels for channel 6, 20MHz
+	 */
+	rtl8723a_set_tx_power(priv, 1, false);
+
+	/* Let the 8051 take control of antenna setting */
+	val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+	val8 |= LEDCFG2_DPDT_SELECT;
+	rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+	rtl8xxxu_write8(priv, REG_HWSEQ_CTRL, 0xff);
+
+	/* Disable BAR - not sure if this has any effect on USB */
+	rtl8xxxu_write32(priv, REG_BAR_MODE_CTRL, 0x0201ffff);
+
+	rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0);
+
+	/*
+	 * Not sure if we should get into this at all
+	 */
+	if (priv->iqk_initialized) {
+		rtl8xxxu_restore_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+				      priv->bb_recovery_backup,
+				      RTL8XXXU_BB_REGS);
+	} else {
+		rtl8723a_phy_iq_calibrate(priv);
+		priv->iqk_initialized = true;
+	}
+
+	/*
+	 * This should enable thermal meter
+	 */
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER, 0x60);
+
+	rtl8723a_phy_lc_calibrate(priv);
+
+	/* fix USB interface interference issue */
+	rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+	rtl8xxxu_write8(priv, 0xfe41, 0x8d);
+	rtl8xxxu_write8(priv, 0xfe42, 0x80);
+	rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
+
+	/* Solve too many protocol error on USB bus */
+	/* Can't do this for 8188/8192 UMC A cut parts */
+	rtl8xxxu_write8(priv, 0xfe40, 0xe6);
+	rtl8xxxu_write8(priv, 0xfe41, 0x94);
+	rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+	rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+	rtl8xxxu_write8(priv, 0xfe41, 0x19);
+	rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+	rtl8xxxu_write8(priv, 0xfe40, 0xe5);
+	rtl8xxxu_write8(priv, 0xfe41, 0x91);
+	rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+	rtl8xxxu_write8(priv, 0xfe40, 0xe2);
+	rtl8xxxu_write8(priv, 0xfe41, 0x81);
+	rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+	/* Init BT hw config. */
+	rtl8xxxu_init_bt(priv);
+
+	/*
+	 * Not sure if we really need to save these parameters, but the
+	 * vendor driver does
+	 */
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2);
+	if (val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR)
+		priv->path_a_hi_power = 1;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+	priv->path_a_rf_paths = val32 & OFDM_RF_PATH_RX_MASK;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+	priv->path_a_ig_value = val32 & OFDM0_X_AGC_CORE1_IGI_MASK;
+
+	/* Set NAV_UPPER to 30000us */
+	val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT);
+	rtl8xxxu_write8(priv, REG_NAV_UPPER, val8);
+
+	/*
+	 * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test,
+	 * but we need to fin root cause.
+	 */
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+	if ((val32 & 0xff000000) != 0x83000000) {
+		val32 |= FPGA_RF_MODE_CCK;
+		rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+	}
+
+	val32 = rtl8xxxu_read32(priv, REG_FWHW_TXQ_CTRL);
+	val32 |= FWHW_TXQ_CTRL_XMIT_MGMT_ACK;
+	/* ack for xmit mgmt frames. */
+	rtl8xxxu_write32(priv, REG_FWHW_TXQ_CTRL, val32);
+
+exit:
+	return ret;
+}
+
+static void rtl8xxxu_disable_device(struct ieee80211_hw *hw)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+
+	rtl8xxxu_power_off(priv);
+}
+
+static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
+			       struct ieee80211_key_conf *key, const u8 *mac)
+{
+	u32 cmd, val32, addr, ctrl;
+	int j, i, tmp_debug;
+
+	tmp_debug = rtl8xxxu_debug;
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_KEY)
+		rtl8xxxu_debug |= RTL8XXXU_DEBUG_REG_WRITE;
+
+	/*
+	 * This is a bit of a hack - the lower bits of the cipher
+	 * suite selector happens to match the cipher index in the CAM
+	 */
+	addr = key->keyidx << CAM_CMD_KEY_SHIFT;
+	ctrl = (key->cipher & 0x0f) << 2 | key->keyidx | CAM_WRITE_VALID;
+
+	for (j = 5; j >= 0; j--) {
+		switch (j) {
+		case 0:
+			val32 = ctrl | (mac[0] << 16) | (mac[1] << 24);
+			break;
+		case 1:
+			val32 = mac[2] | (mac[3] << 8) |
+				(mac[4] << 16) | (mac[5] << 24);
+			break;
+		default:
+			i = (j - 2) << 2;
+			val32 = key->key[i] | (key->key[i + 1] << 8) |
+				key->key[i + 2] << 16 | key->key[i + 3] << 24;
+			break;
+		}
+
+		rtl8xxxu_write32(priv, REG_CAM_WRITE, val32);
+		cmd = CAM_CMD_POLLING | CAM_CMD_WRITE | (addr + j);
+		rtl8xxxu_write32(priv, REG_CAM_CMD, cmd);
+		udelay(100);
+	}
+
+	rtl8xxxu_debug = tmp_debug;
+}
+
+static void rtl8xxxu_sw_scan_start(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif, const u8* mac)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	u8 val8;
+
+	val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+	val8 |= BEACON_DISABLE_TSF_UPDATE;
+	rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+}
+
+static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	u8 val8;
+
+	val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+	val8 &= ~BEACON_DISABLE_TSF_UPDATE;
+	rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+}
+
+static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
+				      u32 ramask, int sgi)
+{
+	struct h2c_cmd h2c;
+
+	h2c.ramask.cmd = H2C_SET_RATE_MASK;
+	h2c.ramask.mask_lo = cpu_to_le16(ramask & 0xffff);
+	h2c.ramask.mask_hi = cpu_to_le16(ramask >> 16);
+
+	h2c.ramask.arg = 0x80;
+	if (sgi)
+		h2c.ramask.arg |= 0x20;
+
+	dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x\n", __func__,
+		ramask, h2c.ramask.arg);
+	rtl8723a_h2c_cmd(priv, &h2c);
+}
+
+static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
+{
+	u32 val32;
+	u8 rate_idx = 0;
+
+	rate_cfg &= RESPONSE_RATE_BITMAP_ALL;
+
+	val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+	val32 &= ~RESPONSE_RATE_BITMAP_ALL;
+	val32 |= rate_cfg;
+	rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
+
+	dev_dbg(&priv->udev->dev, "%s: rates %08x\n", __func__,	rate_cfg);
+
+	while (rate_cfg) {
+		rate_cfg = (rate_cfg >> 1);
+		rate_idx++;
+	}
+	rtl8xxxu_write8(priv, REG_INIRTS_RATE_SEL, rate_idx);
+}
+
+static void
+rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			  struct ieee80211_bss_conf *bss_conf, u32 changed)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct device *dev = &priv->udev->dev;
+	struct ieee80211_sta *sta;
+	u32 val32;
+	u8 val8;
+
+	if (changed & BSS_CHANGED_ASSOC) {
+		struct h2c_cmd h2c;
+
+		dev_dbg(dev, "Changed ASSOC: %i!\n", bss_conf->assoc);
+
+		memset(&h2c, 0, sizeof(struct h2c_cmd));
+		rtl8xxxu_set_linktype(priv, vif->type);
+
+		if (bss_conf->assoc) {
+			u32 ramask;
+			int sgi = 0;
+
+			rcu_read_lock();
+			sta = ieee80211_find_sta(vif, bss_conf->bssid);
+			if (!sta) {
+				dev_info(dev, "%s: ASSOC no sta found\n",
+					 __func__);
+				rcu_read_unlock();
+				goto error;
+			}
+
+			if (sta->ht_cap.ht_supported)
+				dev_info(dev, "%s: HT supported\n", __func__);
+			if (sta->vht_cap.vht_supported)
+				dev_info(dev, "%s: VHT supported\n", __func__);
+
+			/* TODO: Set bits 28-31 for rate adaptive id */
+			ramask = (sta->supp_rates[0] & 0xfff) |
+				sta->ht_cap.mcs.rx_mask[0] << 12 |
+				sta->ht_cap.mcs.rx_mask[1] << 20;
+			if (sta->ht_cap.cap &
+			    (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
+				sgi = 1;
+			rcu_read_unlock();
+
+			rtl8xxxu_update_rate_mask(priv, ramask, sgi);
+
+			val32 = rtl8xxxu_read32(priv, REG_RCR);
+			val32 |= RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON;
+			rtl8xxxu_write32(priv, REG_RCR, val32);
+
+			/* Enable RX of data frames */
+			rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff);
+
+			rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
+
+			rtl8723a_stop_tx_beacon(priv);
+
+			/* joinbss sequence */
+			rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
+					 0xc000 | bss_conf->aid);
+
+			h2c.joinbss.data = H2C_JOIN_BSS_CONNECT;
+		} else {
+			val32 = rtl8xxxu_read32(priv, REG_RCR);
+			val32 &= ~(RCR_CHECK_BSSID_MATCH |
+				   RCR_CHECK_BSSID_BEACON);
+			rtl8xxxu_write32(priv, REG_RCR, val32);
+
+			val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+			val8 |= BEACON_DISABLE_TSF_UPDATE;
+			rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+
+			/* Disable RX of data frames */
+			rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+			h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT;
+		}
+		h2c.joinbss.cmd = H2C_JOIN_BSS_REPORT;
+		rtl8723a_h2c_cmd(priv, &h2c);
+	}
+
+	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+		dev_dbg(dev, "Changed ERP_PREAMBLE: Use short preamble %i\n",
+			bss_conf->use_short_preamble);
+		val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+		if (bss_conf->use_short_preamble)
+			val32 |= RSR_ACK_SHORT_PREAMBLE;
+		else
+			val32 &= ~RSR_ACK_SHORT_PREAMBLE;
+		rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
+	}
+
+	if (changed & BSS_CHANGED_ERP_SLOT) {
+		dev_dbg(dev, "Changed ERP_SLOT: short_slot_time %i\n",
+			bss_conf->use_short_slot);
+
+		if (bss_conf->use_short_slot)
+			val8 = 9;
+		else
+			val8 = 20;
+		rtl8xxxu_write8(priv, REG_SLOT, val8);
+	}
+
+	if (changed & BSS_CHANGED_BSSID) {
+		dev_dbg(dev, "Changed BSSID!\n");
+		rtl8xxxu_set_bssid(priv, bss_conf->bssid);
+	}
+
+	if (changed & BSS_CHANGED_BASIC_RATES) {
+		dev_dbg(dev, "Changed BASIC_RATES!\n");
+		rtl8xxxu_set_basic_rates(priv, bss_conf->basic_rates);
+	}
+error:
+	return;
+}
+
+static u32 rtl8xxxu_80211_to_rtl_queue(u32 queue)
+{
+	u32 rtlqueue;
+
+	switch (queue) {
+	case IEEE80211_AC_VO:
+		rtlqueue = TXDESC_QUEUE_VO;
+		break;
+	case IEEE80211_AC_VI:
+		rtlqueue = TXDESC_QUEUE_VI;
+		break;
+	case IEEE80211_AC_BE:
+		rtlqueue = TXDESC_QUEUE_BE;
+		break;
+	case IEEE80211_AC_BK:
+		rtlqueue = TXDESC_QUEUE_BK;
+		break;
+	default:
+		rtlqueue = TXDESC_QUEUE_BE;
+	}
+
+	return rtlqueue;
+}
+
+static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	u32 queue;
+
+	if (ieee80211_is_mgmt(hdr->frame_control))
+		queue = TXDESC_QUEUE_MGNT;
+	else
+		queue = rtl8xxxu_80211_to_rtl_queue(skb_get_queue_mapping(skb));
+
+	return queue;
+}
+
+static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_tx_desc *tx_desc)
+{
+	__le16 *ptr = (__le16 *)tx_desc;
+	u16 csum = 0;
+	int i;
+
+	/*
+	 * Clear csum field before calculation, as the csum field is
+	 * in the middle of the struct.
+	 */
+	tx_desc->csum = cpu_to_le16(0);
+
+	for (i = 0; i < (sizeof(struct rtl8xxxu_tx_desc) / sizeof(u16)); i++)
+		csum = csum ^ le16_to_cpu(ptr[i]);
+
+	tx_desc->csum |= cpu_to_le16(csum);
+}
+
+static void rtl8xxxu_free_tx_resources(struct rtl8xxxu_priv *priv)
+{
+	struct rtl8xxxu_tx_urb *tx_urb, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->tx_urb_lock, flags);
+	list_for_each_entry_safe(tx_urb, tmp, &priv->tx_urb_free_list, list) {
+		list_del(&tx_urb->list);
+		priv->tx_urb_free_count--;
+		usb_free_urb(&tx_urb->urb);
+	}
+	spin_unlock_irqrestore(&priv->tx_urb_lock, flags);
+}
+
+static struct rtl8xxxu_tx_urb *
+rtl8xxxu_alloc_tx_urb(struct rtl8xxxu_priv *priv)
+{
+	struct rtl8xxxu_tx_urb *tx_urb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->tx_urb_lock, flags);
+	tx_urb = list_first_entry_or_null(&priv->tx_urb_free_list,
+					  struct rtl8xxxu_tx_urb, list);
+	if (tx_urb) {
+		list_del(&tx_urb->list);
+		priv->tx_urb_free_count--;
+		if (priv->tx_urb_free_count < RTL8XXXU_TX_URB_LOW_WATER &&
+		    !priv->tx_stopped) {
+			priv->tx_stopped = true;
+			ieee80211_stop_queues(priv->hw);
+		}
+	}
+
+	spin_unlock_irqrestore(&priv->tx_urb_lock, flags);
+
+	return tx_urb;
+}
+
+static void rtl8xxxu_free_tx_urb(struct rtl8xxxu_priv *priv,
+				 struct rtl8xxxu_tx_urb *tx_urb)
+{
+	unsigned long flags;
+
+	INIT_LIST_HEAD(&tx_urb->list);
+
+	spin_lock_irqsave(&priv->tx_urb_lock, flags);
+
+	list_add(&tx_urb->list, &priv->tx_urb_free_list);
+	priv->tx_urb_free_count++;
+	if (priv->tx_urb_free_count > RTL8XXXU_TX_URB_HIGH_WATER &&
+	    priv->tx_stopped) {
+		priv->tx_stopped = false;
+		ieee80211_wake_queues(priv->hw);
+	}
+
+	spin_unlock_irqrestore(&priv->tx_urb_lock, flags);
+}
+
+static void rtl8xxxu_tx_complete(struct urb *urb)
+{
+	struct sk_buff *skb = (struct sk_buff *)urb->context;
+	struct ieee80211_tx_info *tx_info;
+	struct ieee80211_hw *hw;
+	struct rtl8xxxu_tx_urb *tx_urb =
+		container_of(urb, struct rtl8xxxu_tx_urb, urb);
+
+	tx_info = IEEE80211_SKB_CB(skb);
+	hw = tx_info->rate_driver_data[0];
+
+	skb_pull(skb, sizeof(struct rtl8xxxu_tx_desc));
+
+	ieee80211_tx_info_clear_status(tx_info);
+	tx_info->status.rates[0].idx = -1;
+	tx_info->status.rates[0].count = 0;
+
+	if (!urb->status)
+		tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+	ieee80211_tx_status_irqsafe(hw, skb);
+
+	rtl8xxxu_free_tx_urb(hw->priv, tx_urb);
+}
+
+static void rtl8xxxu_dump_action(struct device *dev,
+				 struct ieee80211_hdr *hdr)
+{
+	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
+	u16 cap, timeout;
+
+	if (!(rtl8xxxu_debug & RTL8XXXU_DEBUG_ACTION))
+		return;
+
+	switch (mgmt->u.action.u.addba_resp.action_code) {
+	case WLAN_ACTION_ADDBA_RESP:
+		cap = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
+		timeout = le16_to_cpu(mgmt->u.action.u.addba_resp.timeout);
+		dev_info(dev, "WLAN_ACTION_ADDBA_RESP: "
+			 "timeout %i, tid %02x, buf_size %02x, policy %02x, "
+			 "status %02x\n",
+			 timeout,
+			 (cap & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2,
+			 (cap & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6,
+			 (cap >> 1) & 0x1,
+			 le16_to_cpu(mgmt->u.action.u.addba_resp.status));
+		break;
+	case WLAN_ACTION_ADDBA_REQ:
+		cap = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+		timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
+		dev_info(dev, "WLAN_ACTION_ADDBA_REQ: "
+			 "timeout %i, tid %02x, buf_size %02x, policy %02x\n",
+			 timeout,
+			 (cap & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2,
+			 (cap & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6,
+			 (cap >> 1) & 0x1);
+		break;
+	default:
+		dev_info(dev, "action frame %02x\n",
+			 mgmt->u.action.u.addba_resp.action_code);
+		break;
+	}
+}
+
+static void rtl8xxxu_tx(struct ieee80211_hw *hw,
+			struct ieee80211_tx_control *control,
+			struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct rtl8xxxu_tx_desc *tx_desc;
+	struct rtl8xxxu_tx_urb *tx_urb;
+	struct ieee80211_sta *sta = NULL;
+	struct ieee80211_vif *vif = tx_info->control.vif;
+	struct device *dev = &priv->udev->dev;
+	u32 queue, rate;
+	u16 pktlen = skb->len;
+	u16 seq_number;
+	u16 rate_flag = tx_info->control.rates[0].flags;
+	int ret;
+
+	if (skb_headroom(skb) < sizeof(struct rtl8xxxu_tx_desc)) {
+		dev_warn(dev,
+			 "%s: Not enough headroom (%i) for tx descriptor\n",
+			 __func__, skb_headroom(skb));
+		goto error;
+	}
+
+	if (unlikely(skb->len > (65535 - sizeof(struct rtl8xxxu_tx_desc)))) {
+		dev_warn(dev, "%s: Trying to send over-sized skb (%i)\n",
+			 __func__, skb->len);
+		goto error;
+	}
+
+	tx_urb = rtl8xxxu_alloc_tx_urb(priv);
+	if (!tx_urb) {
+		dev_warn(dev, "%s: Unable to allocate tx urb\n", __func__);
+		goto error;
+	}
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
+		dev_info(dev, "%s: TX rate: %d (%d), pkt size %d\n",
+			 __func__, tx_rate->bitrate, tx_rate->hw_value, pktlen);
+
+	if (ieee80211_is_action(hdr->frame_control))
+		rtl8xxxu_dump_action(dev, hdr);
+
+	tx_info->rate_driver_data[0] = hw;
+
+	if (control && control->sta)
+		sta = control->sta;
+
+	tx_desc = (struct rtl8xxxu_tx_desc *)
+		skb_push(skb, sizeof(struct rtl8xxxu_tx_desc));
+
+	memset(tx_desc, 0, sizeof(struct rtl8xxxu_tx_desc));
+	tx_desc->pkt_size = cpu_to_le16(pktlen);
+	tx_desc->pkt_offset = sizeof(struct rtl8xxxu_tx_desc);
+
+	tx_desc->txdw0 =
+		TXDESC_OWN | TXDESC_FIRST_SEGMENT | TXDESC_LAST_SEGMENT;
+	if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+	    is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+		tx_desc->txdw0 |= TXDESC_BROADMULTICAST;
+
+	queue = rtl8xxxu_queue_select(hw, skb);
+	tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
+
+	if (tx_info->control.hw_key) {
+		switch (tx_info->control.hw_key->cipher) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+		case WLAN_CIPHER_SUITE_TKIP:
+			tx_desc->txdw1 |= cpu_to_le32(TXDESC_SEC_RC4);
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			tx_desc->txdw1 |= cpu_to_le32(TXDESC_SEC_AES);
+			break;
+		default:
+			break;
+		}
+	}
+
+	seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+	tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT);
+
+	if (rate_flag & IEEE80211_TX_RC_MCS)
+		rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
+	else
+		rate = tx_rate->hw_value;
+	tx_desc->txdw5 = cpu_to_le32(rate);
+
+	if (ieee80211_is_data(hdr->frame_control))
+		tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
+
+	/* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */
+	if (ieee80211_is_data_qos(hdr->frame_control) && sta) {
+		if (sta->ht_cap.ht_supported) {
+			u32 ampdu, val32;
+
+			ampdu = (u32)sta->ht_cap.ampdu_density;
+			val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT;
+			tx_desc->txdw2 |= cpu_to_le32(val32);
+			tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE);
+		} else
+			tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK);
+	} else
+		tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK);
+
+	if (ieee80211_is_data_qos(hdr->frame_control))
+		tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS);
+	if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
+	    (sta && vif && vif->bss_conf.use_short_preamble))
+		tx_desc->txdw4 |= cpu_to_le32(TXDESC_SHORT_PREAMBLE);
+	if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
+	    (ieee80211_is_data_qos(hdr->frame_control) &&
+	     sta && sta->ht_cap.cap &
+	     (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
+		tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI);
+	}
+	if (ieee80211_is_mgmt(hdr->frame_control)) {
+		tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
+		tx_desc->txdw4 |= cpu_to_le32(TXDESC_USE_DRIVER_RATE);
+		tx_desc->txdw5 |= cpu_to_le32(6 << TXDESC_RETRY_LIMIT_SHIFT);
+		tx_desc->txdw5 |= cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE);
+	}
+
+	if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+		/* Use RTS rate 24M - does the mac80211 tell us which to use? */
+		tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M);
+		tx_desc->txdw4 |= cpu_to_le32(TXDESC_RTS_CTS_ENABLE);
+		tx_desc->txdw4 |= cpu_to_le32(TXDESC_HW_RTS_ENABLE);
+	}
+
+	rtl8xxxu_calc_tx_desc_csum(tx_desc);
+
+	usb_fill_bulk_urb(&tx_urb->urb, priv->udev, priv->pipe_out[queue],
+			  skb->data, skb->len, rtl8xxxu_tx_complete, skb);
+
+	usb_anchor_urb(&tx_urb->urb, &priv->tx_anchor);
+	ret = usb_submit_urb(&tx_urb->urb, GFP_ATOMIC);
+	if (ret) {
+		usb_unanchor_urb(&tx_urb->urb);
+		rtl8xxxu_free_tx_urb(priv, tx_urb);
+		goto error;
+	}
+	return;
+error:
+	dev_kfree_skb(skb);
+}
+
+static void rtl8xxxu_rx_parse_phystats(struct rtl8xxxu_priv *priv,
+				       struct ieee80211_rx_status *rx_status,
+				       struct rtl8xxxu_rx_desc *rx_desc,
+				       struct rtl8723au_phy_stats *phy_stats)
+{
+	if (phy_stats->sgi_en)
+		rx_status->flag |= RX_FLAG_SHORT_GI;
+
+	if (rx_desc->rxmcs < DESC_RATE_6M) {
+		/*
+		 * Handle PHY stats for CCK rates
+		 */
+		u8 cck_agc_rpt = phy_stats->cck_agc_rpt_ofdm_cfosho_a;
+
+		switch (cck_agc_rpt & 0xc0) {
+		case 0xc0:
+			rx_status->signal = -46 - (cck_agc_rpt & 0x3e);
+			break;
+		case 0x80:
+			rx_status->signal = -26 - (cck_agc_rpt & 0x3e);
+			break;
+		case 0x40:
+			rx_status->signal = -12 - (cck_agc_rpt & 0x3e);
+			break;
+		case 0x00:
+			rx_status->signal = 16 - (cck_agc_rpt & 0x3e);
+			break;
+		}
+	} else {
+		rx_status->signal =
+			(phy_stats->cck_sig_qual_ofdm_pwdb_all >> 1) - 110;
+	}
+}
+
+static void rtl8xxxu_free_rx_resources(struct rtl8xxxu_priv *priv)
+{
+	struct rtl8xxxu_rx_urb *rx_urb, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->rx_urb_lock, flags);
+
+	list_for_each_entry_safe(rx_urb, tmp,
+				 &priv->rx_urb_pending_list, list) {
+		list_del(&rx_urb->list);
+		priv->rx_urb_pending_count--;
+		usb_free_urb(&rx_urb->urb);
+	}
+
+	spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+}
+
+static void rtl8xxxu_queue_rx_urb(struct rtl8xxxu_priv *priv,
+				  struct rtl8xxxu_rx_urb *rx_urb)
+{
+	struct sk_buff *skb;
+	unsigned long flags;
+	int pending = 0;
+
+	spin_lock_irqsave(&priv->rx_urb_lock, flags);
+
+	if (!priv->shutdown) {
+		list_add_tail(&rx_urb->list, &priv->rx_urb_pending_list);
+		priv->rx_urb_pending_count++;
+		pending = priv->rx_urb_pending_count;
+	} else {
+		skb = (struct sk_buff *)rx_urb->urb.context;
+		dev_kfree_skb(skb);
+		usb_free_urb(&rx_urb->urb);
+	}
+
+	spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+	if (pending > RTL8XXXU_RX_URB_PENDING_WATER)
+		schedule_work(&priv->rx_urb_wq);
+}
+
+static void rtl8xxxu_rx_urb_work(struct work_struct *work)
+{
+	struct rtl8xxxu_priv *priv;
+	struct rtl8xxxu_rx_urb *rx_urb, *tmp;
+	struct list_head local;
+	struct sk_buff *skb;
+	unsigned long flags;
+	int ret;
+
+	priv = container_of(work, struct rtl8xxxu_priv, rx_urb_wq);
+	INIT_LIST_HEAD(&local);
+
+	spin_lock_irqsave(&priv->rx_urb_lock, flags);
+
+	list_splice_init(&priv->rx_urb_pending_list, &local);
+	priv->rx_urb_pending_count = 0;
+
+	spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+	list_for_each_entry_safe(rx_urb, tmp, &local, list) {
+		list_del_init(&rx_urb->list);
+		ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
+		/*
+		 * If out of memory or temporary error, put it back on the
+		 * queue and try again. Otherwise the device is dead/gone
+		 * and we should drop it.
+		 */
+		switch (ret) {
+		case 0:
+			break;
+		case -ENOMEM:
+		case -EAGAIN:
+			rtl8xxxu_queue_rx_urb(priv, rx_urb);
+			break;
+		default:
+			pr_info("failed to requeue urb %i\n", ret);
+			skb = (struct sk_buff *)rx_urb->urb.context;
+			dev_kfree_skb(skb);
+			usb_free_urb(&rx_urb->urb);
+		}
+	}
+}
+
+static void rtl8xxxu_rx_complete(struct urb *urb)
+{
+	struct rtl8xxxu_rx_urb *rx_urb =
+		container_of(urb, struct rtl8xxxu_rx_urb, urb);
+	struct ieee80211_hw *hw = rx_urb->hw;
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct sk_buff *skb = (struct sk_buff *)urb->context;
+	struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data;
+	struct rtl8723au_phy_stats *phy_stats;
+	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+	struct ieee80211_mgmt *mgmt;
+	struct device *dev = &priv->udev->dev;
+	__le32 *_rx_desc_le = (__le32 *)skb->data;
+	u32 *_rx_desc = (u32 *)skb->data;
+	int cnt, len, drvinfo_sz, desc_shift, i;
+
+	for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++)
+		_rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+
+	cnt = rx_desc->frag;
+	len = rx_desc->pktlen;
+	drvinfo_sz = rx_desc->drvinfo_sz * 8;
+	desc_shift = rx_desc->shift;
+	skb_put(skb, urb->actual_length);
+
+	if (urb->status == 0) {
+		skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc));
+		phy_stats = (struct rtl8723au_phy_stats *)skb->data;
+
+		skb_pull(skb, drvinfo_sz + desc_shift);
+
+		mgmt = (struct ieee80211_mgmt *)skb->data;
+
+		memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+		if (rx_desc->phy_stats)
+			rtl8xxxu_rx_parse_phystats(priv, rx_status,
+						   rx_desc, phy_stats);
+
+		rx_status->freq = hw->conf.chandef.chan->center_freq;
+		rx_status->band = hw->conf.chandef.chan->band;
+
+		rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+		rx_status->flag |= RX_FLAG_MACTIME_START;
+
+		if (!rx_desc->swdec)
+			rx_status->flag |= RX_FLAG_DECRYPTED;
+		if (rx_desc->crc32)
+			rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+		if (rx_desc->bw)
+			rx_status->flag |= RX_FLAG_40MHZ;
+
+		if (rx_desc->rxht) {
+			rx_status->flag |= RX_FLAG_HT;
+			rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
+		} else {
+			rx_status->rate_idx = rx_desc->rxmcs;
+		}
+
+		ieee80211_rx_irqsafe(hw, skb);
+		skb = NULL;
+		rx_urb->urb.context = NULL;
+		rtl8xxxu_queue_rx_urb(priv, rx_urb);
+	} else {
+		dev_dbg(dev, "%s: status %i\n",	__func__, urb->status);
+		goto cleanup;
+	}
+	return;
+
+cleanup:
+	usb_free_urb(urb);
+	dev_kfree_skb(skb);
+	return;
+}
+
+static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
+				  struct rtl8xxxu_rx_urb *rx_urb)
+{
+	struct sk_buff *skb;
+	int skb_size;
+	int ret;
+
+	skb_size = sizeof(struct rtl8xxxu_rx_desc) + RTL_RX_BUFFER_SIZE;
+	skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	memset(skb->data, 0, sizeof(struct rtl8xxxu_rx_desc));
+	usb_fill_bulk_urb(&rx_urb->urb, priv->udev, priv->pipe_in, skb->data,
+			  skb_size, rtl8xxxu_rx_complete, skb);
+	usb_anchor_urb(&rx_urb->urb, &priv->rx_anchor);
+	ret = usb_submit_urb(&rx_urb->urb, GFP_ATOMIC);
+	if (ret)
+		usb_unanchor_urb(&rx_urb->urb);
+	return ret;
+}
+
+static void rtl8xxxu_int_complete(struct urb *urb)
+{
+	struct rtl8xxxu_priv *priv = (struct rtl8xxxu_priv *)urb->context;
+	struct device *dev = &priv->udev->dev;
+	int ret;
+
+	dev_dbg(dev, "%s: status %i\n", __func__, urb->status);
+	if (urb->status == 0) {
+		usb_anchor_urb(urb, &priv->int_anchor);
+		ret = usb_submit_urb(urb, GFP_ATOMIC);
+		if (ret)
+			usb_unanchor_urb(urb);
+	} else {
+		dev_info(dev, "%s: Error %i\n", __func__, urb->status);
+	}
+}
+
+
+static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct urb *urb;
+	u32 val32;
+	int ret;
+
+	urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!urb)
+		return -ENOMEM;
+
+	usb_fill_int_urb(urb, priv->udev, priv->pipe_interrupt,
+			 priv->int_buf, USB_INTR_CONTENT_LENGTH,
+			 rtl8xxxu_int_complete, priv, 1);
+	usb_anchor_urb(urb, &priv->int_anchor);
+	ret = usb_submit_urb(urb, GFP_KERNEL);
+	if (ret) {
+		usb_unanchor_urb(urb);
+		goto error;
+	}
+
+	val32 = rtl8xxxu_read32(priv, REG_USB_HIMR);
+	val32 |= USB_HIMR_CPWM;
+	rtl8xxxu_write32(priv, REG_USB_HIMR, val32);
+
+error:
+	return ret;
+}
+
+static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
+				  struct ieee80211_vif *vif)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	int ret;
+	u8 val8;
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		rtl8723a_stop_tx_beacon(priv);
+
+		val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+		val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
+			BEACON_DISABLE_TSF_UPDATE;
+		rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+		ret = 0;
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+	}
+
+	rtl8xxxu_set_linktype(priv, vif->type);
+
+	return ret;
+}
+
+static void rtl8xxxu_remove_interface(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+
+	dev_dbg(&priv->udev->dev, "%s\n", __func__);
+}
+
+static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct device *dev = &priv->udev->dev;
+	u16 val16;
+	int ret = 0, channel;
+	bool ht40;
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_CHANNEL)
+		dev_info(dev,
+			 "%s: channel: %i (changed %08x chandef.width %02x)\n",
+			 __func__, hw->conf.chandef.chan->hw_value,
+			 changed, hw->conf.chandef.width);
+
+	if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+		val16 = ((hw->conf.long_frame_max_tx_count <<
+			  RETRY_LIMIT_LONG_SHIFT) & RETRY_LIMIT_LONG_MASK) |
+			((hw->conf.short_frame_max_tx_count <<
+			  RETRY_LIMIT_SHORT_SHIFT) & RETRY_LIMIT_SHORT_MASK);
+		rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16);
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		switch (hw->conf.chandef.width) {
+		case NL80211_CHAN_WIDTH_20_NOHT:
+		case NL80211_CHAN_WIDTH_20:
+			ht40 = false;
+			break;
+		case NL80211_CHAN_WIDTH_40:
+			ht40 = true;
+			break;
+		default:
+			ret = -ENOTSUPP;
+			goto exit;
+		}
+
+		channel = hw->conf.chandef.chan->hw_value;
+
+		rtl8723a_set_tx_power(priv, channel, ht40);
+
+		rtl8723au_config_channel(hw);
+	}
+
+exit:
+	return ret;
+}
+
+static int rtl8xxxu_conf_tx(struct ieee80211_hw *hw,
+			    struct ieee80211_vif *vif, u16 queue,
+			    const struct ieee80211_tx_queue_params *param)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct device *dev = &priv->udev->dev;
+	u32 val32;
+	u8 aifs, acm_ctrl, acm_bit;
+
+	aifs = param->aifs;
+
+	val32 = aifs |
+		fls(param->cw_min) << EDCA_PARAM_ECW_MIN_SHIFT |
+		fls(param->cw_max) << EDCA_PARAM_ECW_MAX_SHIFT |
+		(u32)param->txop << EDCA_PARAM_TXOP_SHIFT;
+
+	acm_ctrl = rtl8xxxu_read8(priv, REG_ACM_HW_CTRL);
+	dev_dbg(dev,
+		"%s: IEEE80211 queue %02x val %08x, acm %i, acm_ctrl %02x\n",
+		__func__, queue, val32, param->acm, acm_ctrl);
+
+	switch (queue) {
+	case IEEE80211_AC_VO:
+		acm_bit = ACM_HW_CTRL_VO;
+		rtl8xxxu_write32(priv, REG_EDCA_VO_PARAM, val32);
+		break;
+	case IEEE80211_AC_VI:
+		acm_bit = ACM_HW_CTRL_VI;
+		rtl8xxxu_write32(priv, REG_EDCA_VI_PARAM, val32);
+		break;
+	case IEEE80211_AC_BE:
+		acm_bit = ACM_HW_CTRL_BE;
+		rtl8xxxu_write32(priv, REG_EDCA_BE_PARAM, val32);
+		break;
+	case IEEE80211_AC_BK:
+		acm_bit = ACM_HW_CTRL_BK;
+		rtl8xxxu_write32(priv, REG_EDCA_BK_PARAM, val32);
+		break;
+	default:
+		acm_bit = 0;
+		break;
+	}
+
+	if (param->acm)
+		acm_ctrl |= acm_bit;
+	else
+		acm_ctrl &= ~acm_bit;
+	rtl8xxxu_write8(priv, REG_ACM_HW_CTRL, acm_ctrl);
+
+	return 0;
+}
+
+static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
+				      unsigned int changed_flags,
+				      unsigned int *total_flags, u64 multicast)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+
+	dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n",
+		__func__, changed_flags, *total_flags);
+
+	*total_flags &= (FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC);
+}
+
+static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts)
+{
+	if (rts > 2347)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+			    struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta,
+			    struct ieee80211_key_conf *key)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct device *dev = &priv->udev->dev;
+	u8 mac_addr[ETH_ALEN];
+	u8 val8;
+	u16 val16;
+	u32 val32;
+	int retval = -EOPNOTSUPP;
+
+	dev_dbg(dev, "%s: cmd %02x, cipher %08x, index %i\n",
+		__func__, cmd, key->cipher, key->keyidx);
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return -EOPNOTSUPP;
+
+	if (key->keyidx > 3)
+		return -EOPNOTSUPP;
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+		dev_dbg(dev, "%s: pairwise key\n", __func__);
+		ether_addr_copy(mac_addr, sta->addr);
+	} else {
+		dev_dbg(dev, "%s: group key\n", __func__);
+		eth_broadcast_addr(mac_addr);
+	}
+
+	val16 = rtl8xxxu_read16(priv, REG_CR);
+	val16 |= CR_SECURITY_ENABLE;
+	rtl8xxxu_write16(priv, REG_CR, val16);
+
+	val8 = SEC_CFG_TX_SEC_ENABLE | SEC_CFG_TXBC_USE_DEFKEY |
+		SEC_CFG_RX_SEC_ENABLE | SEC_CFG_RXBC_USE_DEFKEY;
+	val8 |= SEC_CFG_TX_USE_DEFKEY | SEC_CFG_RX_USE_DEFKEY;
+	rtl8xxxu_write8(priv, REG_SECURITY_CFG, val8);
+
+	switch (cmd) {
+	case SET_KEY:
+		key->hw_key_idx = key->keyidx;
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+		rtl8xxxu_cam_write(priv, key, mac_addr);
+		retval = 0;
+		break;
+	case DISABLE_KEY:
+		rtl8xxxu_write32(priv, REG_CAM_WRITE, 0x00000000);
+		val32 = CAM_CMD_POLLING | CAM_CMD_WRITE |
+			key->keyidx << CAM_CMD_KEY_SHIFT;
+		rtl8xxxu_write32(priv, REG_CAM_CMD, val32);
+		retval = 0;
+		break;
+	default:
+		dev_warn(dev, "%s: Unsupported command %02x\n", __func__, cmd);
+	}
+
+	return retval;
+}
+
+static int
+rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		      enum ieee80211_ampdu_mlme_action action,
+		      struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
+		      bool amsdu)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct device *dev = &priv->udev->dev;
+	u8 ampdu_factor, ampdu_density;
+
+	switch (action) {
+	case IEEE80211_AMPDU_TX_START:
+		dev_info(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__);
+		ampdu_factor = sta->ht_cap.ampdu_factor;
+		ampdu_density = sta->ht_cap.ampdu_density;
+		rtl8xxxu_set_ampdu_factor(priv, ampdu_factor);
+		rtl8xxxu_set_ampdu_min_space(priv, ampdu_density);
+		dev_dbg(dev,
+			"Changed HT: ampdu_factor %02x, ampdu_density %02x\n",
+			ampdu_factor, ampdu_density);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+		dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__);
+		rtl8xxxu_set_ampdu_factor(priv, 0);
+		rtl8xxxu_set_ampdu_min_space(priv, 0);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n",
+			 __func__);
+		rtl8xxxu_set_ampdu_factor(priv, 0);
+		rtl8xxxu_set_ampdu_min_space(priv, 0);
+		break;
+	case IEEE80211_AMPDU_RX_START:
+		dev_info(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		dev_info(dev, "%s: IEEE80211_AMPDU_RX_STOP\n", __func__);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int rtl8xxxu_start(struct ieee80211_hw *hw)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct rtl8xxxu_rx_urb *rx_urb;
+	struct rtl8xxxu_tx_urb *tx_urb;
+	unsigned long flags;
+	int ret, i;
+
+	ret = 0;
+
+	init_usb_anchor(&priv->rx_anchor);
+	init_usb_anchor(&priv->tx_anchor);
+	init_usb_anchor(&priv->int_anchor);
+
+	rtl8723a_enable_rf(priv);
+	ret = rtl8xxxu_submit_int_urb(hw);
+	if (ret)
+		goto exit;
+
+	for (i = 0; i < RTL8XXXU_TX_URBS; i++) {
+		tx_urb = kmalloc(sizeof(struct rtl8xxxu_tx_urb), GFP_KERNEL);
+		if (!tx_urb) {
+			if (!i)
+				ret = -ENOMEM;
+
+			goto error_out;
+		}
+		usb_init_urb(&tx_urb->urb);
+		INIT_LIST_HEAD(&tx_urb->list);
+		tx_urb->hw = hw;
+		list_add(&tx_urb->list, &priv->tx_urb_free_list);
+		priv->tx_urb_free_count++;
+	}
+
+	priv->tx_stopped = false;
+
+	spin_lock_irqsave(&priv->rx_urb_lock, flags);
+	priv->shutdown = false;
+	spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+	for (i = 0; i < RTL8XXXU_RX_URBS; i++) {
+		rx_urb = kmalloc(sizeof(struct rtl8xxxu_rx_urb), GFP_KERNEL);
+		if (!rx_urb) {
+			if (!i)
+				ret = -ENOMEM;
+
+			goto error_out;
+		}
+		usb_init_urb(&rx_urb->urb);
+		INIT_LIST_HEAD(&rx_urb->list);
+		rx_urb->hw = hw;
+
+		ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
+	}
+exit:
+	/*
+	 * Disable all data frames
+	 */
+	rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+	/*
+	 * Accept all mgmt frames
+	 */
+	rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0xffff);
+
+	rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6954341e);
+
+	return ret;
+
+error_out:
+	rtl8xxxu_free_tx_resources(priv);
+	/*
+	 * Disable all data and mgmt frames
+	 */
+	rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+	rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0x0000);
+
+	return ret;
+}
+
+static void rtl8xxxu_stop(struct ieee80211_hw *hw)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	unsigned long flags;
+
+	rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+	rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0x0000);
+	rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+
+	spin_lock_irqsave(&priv->rx_urb_lock, flags);
+	priv->shutdown = true;
+	spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+	usb_kill_anchored_urbs(&priv->rx_anchor);
+	usb_kill_anchored_urbs(&priv->tx_anchor);
+	usb_kill_anchored_urbs(&priv->int_anchor);
+
+	rtl8723a_disable_rf(priv);
+
+	/*
+	 * Disable interrupts
+	 */
+	rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+
+	rtl8xxxu_free_rx_resources(priv);
+	rtl8xxxu_free_tx_resources(priv);
+}
+
+static const struct ieee80211_ops rtl8xxxu_ops = {
+	.tx = rtl8xxxu_tx,
+	.add_interface = rtl8xxxu_add_interface,
+	.remove_interface = rtl8xxxu_remove_interface,
+	.config = rtl8xxxu_config,
+	.conf_tx = rtl8xxxu_conf_tx,
+	.bss_info_changed = rtl8xxxu_bss_info_changed,
+	.configure_filter = rtl8xxxu_configure_filter,
+	.set_rts_threshold = rtl8xxxu_set_rts_threshold,
+	.start = rtl8xxxu_start,
+	.stop = rtl8xxxu_stop,
+	.sw_scan_start = rtl8xxxu_sw_scan_start,
+	.sw_scan_complete = rtl8xxxu_sw_scan_complete,
+	.set_key = rtl8xxxu_set_key,
+	.ampdu_action = rtl8xxxu_ampdu_action,
+};
+
+static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv,
+			      struct usb_interface *interface)
+{
+	struct usb_interface_descriptor *interface_desc;
+	struct usb_host_interface *host_interface;
+	struct usb_endpoint_descriptor *endpoint;
+	struct device *dev = &priv->udev->dev;
+	int i, j = 0, endpoints;
+	u8 dir, xtype, num;
+	int ret = 0;
+
+	host_interface = &interface->altsetting[0];
+	interface_desc = &host_interface->desc;
+	endpoints = interface_desc->bNumEndpoints;
+
+	for (i = 0; i < endpoints; i++) {
+		endpoint = &host_interface->endpoint[i].desc;
+
+		dir = endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
+		num = usb_endpoint_num(endpoint);
+		xtype = usb_endpoint_type(endpoint);
+		if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+			dev_dbg(dev,
+				"%s: endpoint: dir %02x, # %02x, type %02x\n",
+				__func__, dir, num, xtype);
+		if (usb_endpoint_dir_in(endpoint) &&
+		    usb_endpoint_xfer_bulk(endpoint)) {
+			if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+				dev_dbg(dev, "%s: in endpoint num %i\n",
+					__func__, num);
+
+			if (priv->pipe_in) {
+				dev_warn(dev,
+					 "%s: Too many IN pipes\n", __func__);
+				ret = -EINVAL;
+				goto exit;
+			}
+
+			priv->pipe_in =	usb_rcvbulkpipe(priv->udev, num);
+		}
+
+		if (usb_endpoint_dir_in(endpoint) &&
+		    usb_endpoint_xfer_int(endpoint)) {
+			if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+				dev_dbg(dev, "%s: interrupt endpoint num %i\n",
+					__func__, num);
+
+			if (priv->pipe_interrupt) {
+				dev_warn(dev, "%s: Too many INTERRUPT pipes\n",
+					 __func__);
+				ret = -EINVAL;
+				goto exit;
+			}
+
+			priv->pipe_interrupt = usb_rcvintpipe(priv->udev, num);
+		}
+
+		if (usb_endpoint_dir_out(endpoint) &&
+		    usb_endpoint_xfer_bulk(endpoint)) {
+			if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+				dev_dbg(dev, "%s: out endpoint num %i\n",
+					__func__, num);
+			if (j >= RTL8XXXU_OUT_ENDPOINTS) {
+				dev_warn(dev,
+					 "%s: Too many OUT pipes\n", __func__);
+				ret = -EINVAL;
+				goto exit;
+			}
+			priv->out_ep[j++] = num;
+		}
+	}
+exit:
+	priv->nr_out_eps = j;
+	return ret;
+}
+
+static int rtl8xxxu_probe(struct usb_interface *interface,
+			  const struct usb_device_id *id)
+{
+	struct rtl8xxxu_priv *priv;
+	struct ieee80211_hw *hw;
+	struct usb_device *udev;
+	struct ieee80211_supported_band *sband;
+	int ret = 0;
+	int untested = 1;
+
+	udev = usb_get_dev(interface_to_usbdev(interface));
+
+	switch (id->idVendor) {
+	case USB_VENDOR_ID_REALTEK:
+		switch(id->idProduct) {
+		case 0x1724:
+		case 0x8176:
+		case 0x8178:
+		case 0x817f:
+			untested = 0;
+			break;
+		}
+		break;
+	case 0x7392:
+		if (id->idProduct == 0x7811)
+			untested = 0;
+		break;
+	default:
+		break;
+	}
+
+	if (untested) {
+		rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE;
+		dev_info(&udev->dev,
+			 "This Realtek USB WiFi dongle (0x%04x:0x%04x) is untested!\n",
+			 id->idVendor, id->idProduct);
+		dev_info(&udev->dev,
+			 "Please report results to Jes.Sorensen@gmail.com\n");
+	}
+
+	hw = ieee80211_alloc_hw(sizeof(struct rtl8xxxu_priv), &rtl8xxxu_ops);
+	if (!hw) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	priv = hw->priv;
+	priv->hw = hw;
+	priv->udev = udev;
+	priv->fops = (struct rtl8xxxu_fileops *)id->driver_info;
+	mutex_init(&priv->usb_buf_mutex);
+	mutex_init(&priv->h2c_mutex);
+	INIT_LIST_HEAD(&priv->tx_urb_free_list);
+	spin_lock_init(&priv->tx_urb_lock);
+	INIT_LIST_HEAD(&priv->rx_urb_pending_list);
+	spin_lock_init(&priv->rx_urb_lock);
+	INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
+
+	usb_set_intfdata(interface, hw);
+
+	ret = rtl8xxxu_parse_usb(priv, interface);
+	if (ret)
+		goto exit;
+
+	ret = rtl8xxxu_identify_chip(priv);
+	if (ret) {
+		dev_err(&udev->dev, "Fatal - failed to identify chip\n");
+		goto exit;
+	}
+
+	ret = rtl8xxxu_read_efuse(priv);
+	if (ret) {
+		dev_err(&udev->dev, "Fatal - failed to read EFuse\n");
+		goto exit;
+	}
+
+	ret = priv->fops->parse_efuse(priv);
+	if (ret) {
+		dev_err(&udev->dev, "Fatal - failed to parse EFuse\n");
+		goto exit;
+	}
+
+	rtl8xxxu_print_chipinfo(priv);
+
+	ret = priv->fops->load_firmware(priv);
+	if (ret) {
+		dev_err(&udev->dev, "Fatal - failed to load firmware\n");
+		goto exit;
+	}
+
+	ret = rtl8xxxu_init_device(hw);
+
+	hw->wiphy->max_scan_ssids = 1;
+	hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+	hw->queues = 4;
+
+	sband = &rtl8xxxu_supported_band;
+	sband->ht_cap.ht_supported = true;
+	sband->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+	sband->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+	sband->ht_cap.cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40;
+	memset(&sband->ht_cap.mcs, 0, sizeof(sband->ht_cap.mcs));
+	sband->ht_cap.mcs.rx_mask[0] = 0xff;
+	sband->ht_cap.mcs.rx_mask[4] = 0x01;
+	if (priv->rf_paths > 1) {
+		sband->ht_cap.mcs.rx_mask[1] = 0xff;
+		sband->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+	}
+	sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+	/*
+	 * Some APs will negotiate HT20_40 in a noisy environment leading
+	 * to miserable performance. Rather than defaulting to this, only
+	 * enable it if explicitly requested at module load time.
+	 */
+	if (rtl8xxxu_ht40_2g) {
+		dev_info(&udev->dev, "Enabling HT_20_40 on the 2.4GHz band\n");
+		sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+	}
+	hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+
+	hw->wiphy->rts_threshold = 2347;
+
+	SET_IEEE80211_DEV(priv->hw, &interface->dev);
+	SET_IEEE80211_PERM_ADDR(hw, priv->mac_addr);
+
+	hw->extra_tx_headroom = sizeof(struct rtl8xxxu_tx_desc);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	/*
+	 * The firmware handles rate control
+	 */
+	ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+
+	ret = ieee80211_register_hw(priv->hw);
+	if (ret) {
+		dev_err(&udev->dev, "%s: Failed to register: %i\n",
+			__func__, ret);
+		goto exit;
+	}
+
+exit:
+	if (ret < 0)
+		usb_put_dev(udev);
+	return ret;
+}
+
+static void rtl8xxxu_disconnect(struct usb_interface *interface)
+{
+	struct rtl8xxxu_priv *priv;
+	struct ieee80211_hw *hw;
+
+	hw = usb_get_intfdata(interface);
+	priv = hw->priv;
+
+	rtl8xxxu_disable_device(hw);
+	usb_set_intfdata(interface, NULL);
+
+	dev_info(&priv->udev->dev, "disconnecting\n");
+
+	ieee80211_unregister_hw(hw);
+
+	kfree(priv->fw_data);
+	mutex_destroy(&priv->usb_buf_mutex);
+	mutex_destroy(&priv->h2c_mutex);
+
+	usb_put_dev(priv->udev);
+	ieee80211_free_hw(hw);
+}
+
+static struct rtl8xxxu_fileops rtl8723au_fops = {
+	.parse_efuse = rtl8723au_parse_efuse,
+	.load_firmware = rtl8723au_load_firmware,
+	.power_on = rtl8723au_power_on,
+	.writeN_block_size = 1024,
+};
+
+static struct rtl8xxxu_fileops rtl8192cu_fops = {
+	.parse_efuse = rtl8192cu_parse_efuse,
+	.load_firmware = rtl8192cu_load_firmware,
+	.power_on = rtl8192cu_power_on,
+	.writeN_block_size = 128,
+};
+
+static struct usb_device_id dev_table[] = {
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8724, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8723au_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1724, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8723au_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x0724, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8723au_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8178, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817f, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Larry Finger */
+{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7811, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+/* Currently untested 8188 series devices */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8170, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8177, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817a, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817b, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817d, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817e, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818a, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x1058, 0x0631, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x094c, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1102, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ba, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1e1e, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x5088, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0052, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x005c, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0eb0, 0x9071, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x103c, 0x1629, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2a, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2e, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xed17, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x648b, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0090, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops}, /* Netcore 8188RU */
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff9, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffa, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff8, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffb, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffc, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x1201, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+/* Currently untested 8192 series devices */
+{USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0950, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2102, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2103, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x341f, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe035, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ab, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0061, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0070, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016d, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07aa, 0x0056, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8178, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9021, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0xf001, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x2e2e, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0019, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0020, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3307, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3309, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330a, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2b, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x624d, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0100, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0091, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7822, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+#endif
+{ }
+};
+
+static struct usb_driver rtl8xxxu_driver = {
+	.name = DRIVER_NAME,
+	.probe = rtl8xxxu_probe,
+	.disconnect = rtl8xxxu_disconnect,
+	.id_table = dev_table,
+	.disable_hub_initiated_lpm = 1,
+};
+
+static int __init rtl8xxxu_module_init(void)
+{
+	int res;
+
+	res = usb_register(&rtl8xxxu_driver);
+	if (res < 0)
+		pr_err(DRIVER_NAME ": usb_register() failed (%i)\n", res);
+
+	return res;
+}
+
+static void __exit rtl8xxxu_module_exit(void)
+{
+	usb_deregister(&rtl8xxxu_driver);
+}
+
+
+MODULE_DEVICE_TABLE(usb, dev_table);
+
+module_init(rtl8xxxu_module_init);
+module_exit(rtl8xxxu_module_exit);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
new file mode 100644
index 0000000..f2a1bac
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Register definitions taken from original Realtek rtl8723au driver
+ */
+
+#include <asm/byteorder.h>
+
+#define RTL8XXXU_DEBUG_REG_WRITE	0x01
+#define RTL8XXXU_DEBUG_REG_READ		0x02
+#define RTL8XXXU_DEBUG_RFREG_WRITE	0x04
+#define RTL8XXXU_DEBUG_RFREG_READ	0x08
+#define RTL8XXXU_DEBUG_CHANNEL		0x10
+#define RTL8XXXU_DEBUG_TX		0x20
+#define RTL8XXXU_DEBUG_TX_DUMP		0x40
+#define RTL8XXXU_DEBUG_RX		0x80
+#define RTL8XXXU_DEBUG_RX_DUMP		0x100
+#define RTL8XXXU_DEBUG_USB		0x200
+#define RTL8XXXU_DEBUG_KEY		0x400
+#define RTL8XXXU_DEBUG_H2C		0x800
+#define RTL8XXXU_DEBUG_ACTION		0x1000
+#define RTL8XXXU_DEBUG_EFUSE		0x2000
+
+#define RTW_USB_CONTROL_MSG_TIMEOUT	500
+#define RTL8XXXU_MAX_REG_POLL		500
+#define	USB_INTR_CONTENT_LENGTH		56
+
+#define RTL8XXXU_OUT_ENDPOINTS		3
+
+#define REALTEK_USB_READ		0xc0
+#define REALTEK_USB_WRITE		0x40
+#define REALTEK_USB_CMD_REQ		0x05
+#define REALTEK_USB_CMD_IDX		0x00
+
+#define TX_TOTAL_PAGE_NUM		0xf8
+/* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
+#define TX_PAGE_NUM_PUBQ		0xe7
+#define TX_PAGE_NUM_HI_PQ		0x0c
+#define TX_PAGE_NUM_LO_PQ		0x02
+#define TX_PAGE_NUM_NORM_PQ		0x02
+
+#define RTL_FW_PAGE_SIZE		4096
+#define RTL8XXXU_FIRMWARE_POLL_MAX	1000
+
+#define RTL8723A_CHANNEL_GROUPS		3
+#define RTL8723A_MAX_RF_PATHS		2
+#define RF6052_MAX_TX_PWR		0x3f
+
+#define EFUSE_MAP_LEN_8723A		256
+#define EFUSE_MAX_SECTION_8723A		32
+#define EFUSE_REAL_CONTENT_LEN_8723A	512
+#define EFUSE_BT_MAP_LEN_8723A		1024
+#define EFUSE_MAX_WORD_UNIT		4
+
+struct rtl8xxxu_rx_desc {
+#ifdef __LITTLE_ENDIAN
+	u32 pktlen:14;
+	u32 crc32:1;
+	u32 icverr:1;
+	u32 drvinfo_sz:4;
+	u32 security:3;
+	u32 qos:1;
+	u32 shift:2;
+	u32 phy_stats:1;
+	u32 swdec:1;
+	u32 ls:1;
+	u32 fs:1;
+	u32 eor:1;
+	u32 own:1;
+
+	u32 macid:5;
+	u32 tid:4;
+	u32 hwrsvd:4;
+	u32 amsdu:1;
+	u32 paggr:1;
+	u32 faggr:1;
+	u32 a1fit:4;
+	u32 a2fit:4;
+	u32 pam:1;
+	u32 pwr:1;
+	u32 md:1;
+	u32 mf:1;
+	u32 type:2;
+	u32 mc:1;
+	u32 bc:1;
+
+	u32 seq:12;
+	u32 frag:4;
+	u32 nextpktlen:14;
+	u32 nextind:1;
+	u32 reserved0:1;
+
+	u32 rxmcs:6;
+	u32 rxht:1;
+	u32 gf:1;
+	u32 splcp:1;
+	u32 bw:1;
+	u32 htc:1;
+	u32 eosp:1;
+	u32 bssidfit:2;
+	u32 reserved1:16;
+	u32 unicastwake:1;
+	u32 magicwake:1;
+
+	u32 pattern0match:1;
+	u32 pattern1match:1;
+	u32 pattern2match:1;
+	u32 pattern3match:1;
+	u32 pattern4match:1;
+	u32 pattern5match:1;
+	u32 pattern6match:1;
+	u32 pattern7match:1;
+	u32 pattern8match:1;
+	u32 pattern9match:1;
+	u32 patternamatch:1;
+	u32 patternbmatch:1;
+	u32 patterncmatch:1;
+	u32 reserved2:19;
+#else
+	u32 own:1;
+	u32 eor:1;
+	u32 fs:1;
+	u32 ls:1;
+	u32 swdec:1;
+	u32 phy_stats:1;
+	u32 shift:2;
+	u32 qos:1;
+	u32 security:3;
+	u32 drvinfo_sz:4;
+	u32 icverr:1;
+	u32 crc32:1;
+	u32 pktlen:14;
+
+	u32 bc:1;
+	u32 mc:1;
+	u32 type:2;
+	u32 mf:1;
+	u32 md:1;
+	u32 pwr:1;
+	u32 pam:1;
+	u32 a2fit:4;
+	u32 a1fit:4;
+	u32 faggr:1;
+	u32 paggr:1;
+	u32 amsdu:1;
+	u32 hwrsvd:4;
+	u32 tid:4;
+	u32 macid:5;
+
+	u32 reserved0:1;
+	u32 nextind:1;
+	u32 nextpktlen:14;
+	u32 frag:4;
+	u32 seq:12;
+
+	u32 magicwake:1;
+	u32 unicastwake:1;
+	u32 reserved1:16;
+	u32 bssidfit:2;
+	u32 eosp:1;
+	u32 htc:1;
+	u32 bw:1;
+	u32 splcp:1;
+	u32 gf:1;
+	u32 rxht:1;
+	u32 rxmcs:6;
+
+	u32 reserved2:19;
+	u32 patterncmatch:1;
+	u32 patternbmatch:1;
+	u32 patternamatch:1;
+	u32 pattern9match:1;
+	u32 pattern8match:1;
+	u32 pattern7match:1;
+	u32 pattern6match:1;
+	u32 pattern5match:1;
+	u32 pattern4match:1;
+	u32 pattern3match:1;
+	u32 pattern2match:1;
+	u32 pattern1match:1;
+	u32 pattern0match:1;
+#endif
+	__le32 tsfl;
+#if 0
+	u32 bassn:12;
+	u32 bavld:1;
+	u32 reserved3:19;
+#endif
+};
+
+struct rtl8xxxu_tx_desc {
+	__le16 pkt_size;
+	u8 pkt_offset;
+	u8 txdw0;
+	__le32 txdw1;
+	__le32 txdw2;
+	__le32 txdw3;
+	__le32 txdw4;
+	__le32 txdw5;
+	__le32 txdw6;
+	__le16 csum;
+	__le16 txdw7;
+};
+
+/*  CCK Rates, TxHT = 0 */
+#define DESC_RATE_1M			0x00
+#define DESC_RATE_2M			0x01
+#define DESC_RATE_5_5M			0x02
+#define DESC_RATE_11M			0x03
+
+/*  OFDM Rates, TxHT = 0 */
+#define DESC_RATE_6M			0x04
+#define DESC_RATE_9M			0x05
+#define DESC_RATE_12M			0x06
+#define DESC_RATE_18M			0x07
+#define DESC_RATE_24M			0x08
+#define DESC_RATE_36M			0x09
+#define DESC_RATE_48M			0x0a
+#define DESC_RATE_54M			0x0b
+
+/*  MCS Rates, TxHT = 1 */
+#define DESC_RATE_MCS0			0x0c
+#define DESC_RATE_MCS1			0x0d
+#define DESC_RATE_MCS2			0x0e
+#define DESC_RATE_MCS3			0x0f
+#define DESC_RATE_MCS4			0x10
+#define DESC_RATE_MCS5			0x11
+#define DESC_RATE_MCS6			0x12
+#define DESC_RATE_MCS7			0x13
+#define DESC_RATE_MCS8			0x14
+#define DESC_RATE_MCS9			0x15
+#define DESC_RATE_MCS10			0x16
+#define DESC_RATE_MCS11			0x17
+#define DESC_RATE_MCS12			0x18
+#define DESC_RATE_MCS13			0x19
+#define DESC_RATE_MCS14			0x1a
+#define DESC_RATE_MCS15			0x1b
+#define DESC_RATE_MCS15_SG		0x1c
+#define DESC_RATE_MCS32			0x20
+
+#define TXDESC_OFFSET_SZ		0
+#define TXDESC_OFFSET_SHT		16
+#if 0
+#define TXDESC_BMC			BIT(24)
+#define TXDESC_LSG			BIT(26)
+#define TXDESC_FSG			BIT(27)
+#define TXDESC_OWN			BIT(31)
+#else
+#define TXDESC_BROADMULTICAST		BIT(0)
+#define TXDESC_LAST_SEGMENT		BIT(2)
+#define TXDESC_FIRST_SEGMENT		BIT(3)
+#define TXDESC_OWN			BIT(7)
+#endif
+
+/* Word 1 */
+#define TXDESC_PKT_OFFSET_SZ		0
+#define TXDESC_AGG_ENABLE		BIT(5)
+#define TXDESC_BK			BIT(6)
+#define TXDESC_QUEUE_SHIFT		8
+#define TXDESC_QUEUE_MASK		0x1f00
+#define TXDESC_QUEUE_BK			0x2
+#define TXDESC_QUEUE_BE			0x0
+#define TXDESC_QUEUE_VI			0x5
+#define TXDESC_QUEUE_VO			0x7
+#define TXDESC_QUEUE_BEACON		0x10
+#define TXDESC_QUEUE_HIGH		0x11
+#define TXDESC_QUEUE_MGNT		0x12
+#define TXDESC_QUEUE_CMD		0x13
+#define TXDESC_QUEUE_MAX		(TXDESC_QUEUE_CMD + 1)
+
+#define DESC_RATE_ID_SHIFT		16
+#define DESC_RATE_ID_MASK		0xf
+#define TXDESC_NAVUSEHDR		BIT(20)
+#define TXDESC_SEC_RC4			0x00400000
+#define TXDESC_SEC_AES			0x00c00000
+#define TXDESC_PKT_OFFSET_SHIFT		26
+#define TXDESC_AGG_EN			BIT(29)
+#define TXDESC_HWPC			BIT(31)
+
+/* Word 2 */
+#define TXDESC_ACK_REPORT		BIT(19)
+#define TXDESC_AMPDU_DENSITY_SHIFT	20
+
+/* Word 3 */
+#define TXDESC_SEQ_SHIFT		16
+#define TXDESC_SEQ_MASK			0x0fff0000
+
+/* Word 4 */
+#define TXDESC_QOS			BIT(6)
+#define TXDESC_HW_SEQ_ENABLE		BIT(7)
+#define TXDESC_USE_DRIVER_RATE		BIT(8)
+#define TXDESC_DISABLE_DATA_FB		BIT(10)
+#define TXDESC_CTS_SELF_ENABLE		BIT(11)
+#define TXDESC_RTS_CTS_ENABLE		BIT(12)
+#define TXDESC_HW_RTS_ENABLE		BIT(13)
+#define TXDESC_PRIME_CH_OFF_LOWER	BIT(20)
+#define TXDESC_PRIME_CH_OFF_UPPER	BIT(21)
+#define TXDESC_SHORT_PREAMBLE		BIT(24)
+#define TXDESC_DATA_BW			BIT(25)
+#define TXDESC_RTS_DATA_BW		BIT(27)
+#define TXDESC_RTS_PRIME_CH_OFF_LOWER	BIT(28)
+#define TXDESC_RTS_PRIME_CH_OFF_UPPER	BIT(29)
+
+/* Word 5 */
+#define TXDESC_RTS_RATE_SHIFT		0
+#define TXDESC_RTS_RATE_MASK		0x3f
+#define TXDESC_SHORT_GI			BIT(6)
+#define TXDESC_CCX_TAG			BIT(7)
+#define TXDESC_RETRY_LIMIT_ENABLE	BIT(17)
+#define TXDESC_RETRY_LIMIT_SHIFT	18
+#define TXDESC_RETRY_LIMIT_MASK		0x00fc0000
+
+/* Word 6 */
+#define TXDESC_MAX_AGG_SHIFT		11
+
+struct phy_rx_agc_info {
+#ifdef __LITTLE_ENDIAN
+	u8	gain:7, trsw:1;
+#else
+	u8	trsw:1, gain:7;
+#endif
+};
+
+struct rtl8723au_phy_stats {
+	struct phy_rx_agc_info path_agc[RTL8723A_MAX_RF_PATHS];
+	u8	ch_corr[RTL8723A_MAX_RF_PATHS];
+	u8	cck_sig_qual_ofdm_pwdb_all;
+	u8	cck_agc_rpt_ofdm_cfosho_a;
+	u8	cck_rpt_b_ofdm_cfosho_b;
+	u8	reserved_1;
+	u8	noise_power_db_msb;
+	u8	path_cfotail[RTL8723A_MAX_RF_PATHS];
+	u8	pcts_mask[RTL8723A_MAX_RF_PATHS];
+	s8	stream_rxevm[RTL8723A_MAX_RF_PATHS];
+	u8	path_rxsnr[RTL8723A_MAX_RF_PATHS];
+	u8	noise_power_db_lsb;
+	u8	reserved_2[3];
+	u8	stream_csi[RTL8723A_MAX_RF_PATHS];
+	u8	stream_target_csi[RTL8723A_MAX_RF_PATHS];
+	s8	sig_evm;
+	u8	reserved_3;
+
+#ifdef __LITTLE_ENDIAN
+	u8	antsel_rx_keep_2:1;	/* ex_intf_flg:1; */
+	u8	sgi_en:1;
+	u8	rxsc:2;
+	u8	idle_long:1;
+	u8	r_ant_train_en:1;
+	u8	antenna_select_b:1;
+	u8	antenna_select:1;
+#else	/*  _BIG_ENDIAN_ */
+	u8	antenna_select:1;
+	u8	antenna_select_b:1;
+	u8	r_ant_train_en:1;
+	u8	idle_long:1;
+	u8	rxsc:2;
+	u8	sgi_en:1;
+	u8	antsel_rx_keep_2:1;	/* ex_intf_flg:1; */
+#endif
+};
+
+/*
+ * Regs to backup
+ */
+#define RTL8XXXU_ADDA_REGS		16
+#define RTL8XXXU_MAC_REGS		4
+#define RTL8XXXU_BB_REGS		9
+
+struct rtl8xxxu_firmware_header {
+	__le16	signature;		/*  92C0: test chip; 92C,
+					    88C0: test chip;
+					    88C1: MP A-cut;
+					    92C1: MP A-cut */
+	u8	category;		/*  AP/NIC and USB/PCI */
+	u8	function;
+
+	__le16	major_version;		/*  FW Version */
+	u8	minor_version;		/*  FW Subversion, default 0x00 */
+	u8	reserved1;
+
+	u8	month;			/*  Release time Month field */
+	u8	date;			/*  Release time Date field */
+	u8	hour;			/*  Release time Hour field */
+	u8	minute;			/*  Release time Minute field */
+
+	__le16	ramcodesize;		/*  Size of RAM code */
+	u16	reserved2;
+
+	__le32	svn_idx;		/*  SVN entry index */
+	u32	reserved3;
+
+	u32	reserved4;
+	u32	reserved5;
+
+	u8	data[0];
+};
+
+/*
+ * The 8723au has 3 channel groups: 1-3, 4-9, and 10-14
+ */
+struct rtl8723au_idx {
+#ifdef __LITTLE_ENDIAN
+	int	a:4;
+	int	b:4;
+#else
+	int	b:4;
+	int	a:4;
+#endif
+} __attribute__((packed));
+
+struct rtl8723au_efuse {
+	__le16 rtl_id;
+	u8 res0[0xe];
+	u8 cck_tx_power_index_A[3];	/* 0x10 */
+	u8 cck_tx_power_index_B[3];
+	u8 ht40_1s_tx_power_index_A[3];	/* 0x16 */
+	u8 ht40_1s_tx_power_index_B[3];
+	/*
+	 * The following entries are half-bytes split as:
+	 * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed
+	 */
+	struct rtl8723au_idx ht20_tx_power_index_diff[3];
+	struct rtl8723au_idx ofdm_tx_power_index_diff[3];
+	struct rtl8723au_idx ht40_max_power_offset[3];
+	struct rtl8723au_idx ht20_max_power_offset[3];
+	u8 channel_plan;		/* 0x28 */
+	u8 tssi_a;
+	u8 thermal_meter;
+	u8 rf_regulatory;
+	u8 rf_option_2;
+	u8 rf_option_3;
+	u8 rf_option_4;
+	u8 res7;
+	u8 version			/* 0x30 */;
+	u8 customer_id_major;
+	u8 customer_id_minor;
+	u8 xtal_k;
+	u8 chipset;			/* 0x34 */
+	u8 res8[0x82];
+	u8 vid;				/* 0xb7 */
+	u8 res9;
+	u8 pid;				/* 0xb9 */
+	u8 res10[0x0c];
+	u8 mac_addr[ETH_ALEN];		/* 0xc6 */
+	u8 res11[2];
+	u8 vendor_name[7];
+	u8 res12[2];
+	u8 device_name[0x29];		/* 0xd7 */
+};
+
+struct rtl8192cu_efuse {
+	__le16 rtl_id;
+	__le16 hpon;
+	u8 res0[2];
+	__le16 clk;
+	__le16 testr;
+	__le16 vid;
+	__le16 did;
+	__le16 svid;
+	__le16 smid;						/* 0x10 */
+	u8 res1[4];
+	u8 mac_addr[ETH_ALEN];					/* 0x16 */
+	u8 res2[2];
+	u8 vendor_name[7];
+	u8 res3[3];
+	u8 device_name[0x14];					/* 0x28 */
+	u8 res4[0x1e];						/* 0x3c */
+	u8 cck_tx_power_index_A[3];				/* 0x5a */
+	u8 cck_tx_power_index_B[3];
+	u8 ht40_1s_tx_power_index_A[3];				/* 0x60 */
+	u8 ht40_1s_tx_power_index_B[3];
+	/*
+	 * The following entries are half-bytes split as:
+	 * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed
+	 */
+	struct rtl8723au_idx ht40_2s_tx_power_index_diff[3];
+	struct rtl8723au_idx ht20_tx_power_index_diff[3];	/* 0x69 */
+	struct rtl8723au_idx ofdm_tx_power_index_diff[3];
+	struct rtl8723au_idx ht40_max_power_offset[3];		/* 0x6f */
+	struct rtl8723au_idx ht20_max_power_offset[3];
+	u8 channel_plan;					/* 0x75 */
+	u8 tssi_a;
+	u8 tssi_b;
+	u8 thermal_meter;	/* xtal_k */			/* 0x78 */
+	u8 rf_regulatory;
+	u8 rf_option_2;
+	u8 rf_option_3;
+	u8 rf_option_4;
+	u8 res5[1];						/* 0x7d */
+	u8 version;
+	u8 customer_id;
+};
+
+struct rtl8xxxu_reg8val {
+	u16 reg;
+	u8 val;
+};
+
+struct rtl8xxxu_reg32val {
+	u16 reg;
+	u32 val;
+};
+
+struct rtl8xxxu_rfregval {
+	u8 reg;
+	u32 val;
+};
+
+enum rtl8xxxu_rfpath {
+	RF_A = 0,
+	RF_B = 1,
+};
+
+struct rtl8xxxu_rfregs {
+	u16 hssiparm1;
+	u16 hssiparm2;
+	u16 lssiparm;
+	u16 hspiread;
+	u16 lssiread;
+	u16 rf_sw_ctrl;
+};
+
+#define H2C_MAX_MBOX			4
+#define H2C_EXT				BIT(7)
+#define H2C_SET_POWER_MODE		1
+#define H2C_JOIN_BSS_REPORT		2
+#define  H2C_JOIN_BSS_DISCONNECT	0
+#define  H2C_JOIN_BSS_CONNECT		1
+#define H2C_SET_RSSI			5
+#define H2C_SET_RATE_MASK		(6 | H2C_EXT)
+
+struct h2c_cmd {
+	union {
+		struct {
+			u8 cmd;
+			u8 data[5];
+		} __packed cmd;
+		struct {
+			__le32 data;
+			__le16 ext;
+		} __packed raw;
+		struct {
+			u8 cmd;
+			u8 data;
+			u8 pad[4];
+		} __packed joinbss;
+		struct {
+			u8 cmd;
+			__le16 mask_hi;
+			u8 arg;
+			__le16 mask_lo;
+		} __packed ramask;
+	};
+};
+
+struct rtl8xxxu_fileops;
+
+struct rtl8xxxu_priv {
+	struct ieee80211_hw *hw;
+	struct usb_device *udev;
+	struct rtl8xxxu_fileops *fops;
+
+	spinlock_t tx_urb_lock;
+	struct list_head tx_urb_free_list;
+	int tx_urb_free_count;
+	bool tx_stopped;
+
+	spinlock_t rx_urb_lock;
+	struct list_head rx_urb_pending_list;
+	int rx_urb_pending_count;
+	bool shutdown;
+	struct work_struct rx_urb_wq;
+
+	u8 mac_addr[ETH_ALEN];
+	char chip_name[8];
+	u8 cck_tx_power_index_A[3];	/* 0x10 */
+	u8 cck_tx_power_index_B[3];
+	u8 ht40_1s_tx_power_index_A[3];	/* 0x16 */
+	u8 ht40_1s_tx_power_index_B[3];
+	/*
+	 * The following entries are half-bytes split as:
+	 * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed
+	 */
+	struct rtl8723au_idx ht40_2s_tx_power_index_diff[3];
+	struct rtl8723au_idx ht20_tx_power_index_diff[3];
+	struct rtl8723au_idx ofdm_tx_power_index_diff[3];
+	struct rtl8723au_idx ht40_max_power_offset[3];
+	struct rtl8723au_idx ht20_max_power_offset[3];
+	u32 chip_cut:4;
+	u32 rom_rev:4;
+	u32 has_wifi:1;
+	u32 has_bluetooth:1;
+	u32 enable_bluetooth:1;
+	u32 has_gps:1;
+	u32 hi_pa:1;
+	u32 vendor_umc:1;
+	u32 has_polarity_ctrl:1;
+	u32 has_eeprom:1;
+	u32 boot_eeprom:1;
+	u32 ep_tx_high_queue:1;
+	u32 ep_tx_normal_queue:1;
+	u32 ep_tx_low_queue:1;
+	u32 path_a_hi_power:1;
+	u32 path_a_rf_paths:4;
+	unsigned int pipe_interrupt;
+	unsigned int pipe_in;
+	unsigned int pipe_out[TXDESC_QUEUE_MAX];
+	u8 out_ep[RTL8XXXU_OUT_ENDPOINTS];
+	u8 path_a_ig_value;
+	u8 ep_tx_count;
+	u8 rf_paths;
+	u8 rx_paths;
+	u8 tx_paths;
+	u32 rf_mode_ag[2];
+	u32 rege94;
+	u32 rege9c;
+	u32 regeb4;
+	u32 regebc;
+	int next_mbox;
+	int nr_out_eps;
+
+	struct mutex h2c_mutex;
+
+	struct usb_anchor rx_anchor;
+	struct usb_anchor tx_anchor;
+	struct usb_anchor int_anchor;
+	struct rtl8xxxu_firmware_header *fw_data;
+	size_t fw_size;
+	struct mutex usb_buf_mutex;
+	union {
+		__le32 val32;
+		__le16 val16;
+		u8 val8;
+	} usb_buf;
+	union {
+		u8 raw[EFUSE_MAP_LEN_8723A];
+		struct rtl8723au_efuse efuse8723;
+		struct rtl8192cu_efuse efuse8192;
+	} efuse_wifi;
+	u32 adda_backup[RTL8XXXU_ADDA_REGS];
+	u32 mac_backup[RTL8XXXU_MAC_REGS];
+	u32 bb_backup[RTL8XXXU_BB_REGS];
+	u32 bb_recovery_backup[RTL8XXXU_BB_REGS];
+	u32 rtlchip;
+	u8 pi_enabled:1;
+	u8 iqk_initialized:1;
+	u8 int_buf[USB_INTR_CONTENT_LENGTH];
+};
+
+struct rtl8xxxu_rx_urb {
+	struct urb urb;
+	struct ieee80211_hw *hw;
+	struct list_head list;
+};
+
+struct rtl8xxxu_tx_urb {
+	struct urb urb;
+	struct ieee80211_hw *hw;
+	struct list_head list;
+};
+
+struct rtl8xxxu_fileops {
+	int (*parse_efuse) (struct rtl8xxxu_priv *priv);
+	int (*load_firmware) (struct rtl8xxxu_priv *priv);
+	int (*power_on) (struct rtl8xxxu_priv *priv);
+	int writeN_block_size;
+};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
new file mode 100644
index 0000000..23208f7
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -0,0 +1,981 @@
+/*
+ * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Register definitions taken from original Realtek rtl8723au driver
+ */
+
+/* 0x0000 ~ 0x00FF	System Configuration */
+#define REG_SYS_ISO_CTRL		0x0000
+#define  SYS_ISO_MD2PP			BIT(0)
+#define  SYS_ISO_ANALOG_IPS		BIT(5)
+#define  SYS_ISO_DIOR			BIT(9)
+#define  SYS_ISO_PWC_EV25V		BIT(14)
+#define  SYS_ISO_PWC_EV12V		BIT(15)
+
+#define REG_SYS_FUNC			0x0002
+#define  SYS_FUNC_BBRSTB		BIT(0)
+#define  SYS_FUNC_BB_GLB_RSTN		BIT(1)
+#define  SYS_FUNC_USBA			BIT(2)
+#define  SYS_FUNC_UPLL			BIT(3)
+#define  SYS_FUNC_USBD			BIT(4)
+#define  SYS_FUNC_DIO_PCIE		BIT(5)
+#define  SYS_FUNC_PCIEA			BIT(6)
+#define  SYS_FUNC_PPLL			BIT(7)
+#define  SYS_FUNC_PCIED			BIT(8)
+#define  SYS_FUNC_DIOE			BIT(9)
+#define  SYS_FUNC_CPU_ENABLE		BIT(10)
+#define  SYS_FUNC_DCORE			BIT(11)
+#define  SYS_FUNC_ELDR			BIT(12)
+#define  SYS_FUNC_DIO_RF		BIT(13)
+#define  SYS_FUNC_HWPDN			BIT(14)
+#define  SYS_FUNC_MREGEN		BIT(15)
+
+#define REG_APS_FSMCO			0x0004
+#define  APS_FSMCO_PFM_ALDN		BIT(1)
+#define  APS_FSMCO_PFM_WOWL		BIT(3)
+#define  APS_FSMCO_ENABLE_POWERDOWN	BIT(4)
+#define  APS_FSMCO_MAC_ENABLE		BIT(8)
+#define  APS_FSMCO_MAC_OFF		BIT(9)
+#define  APS_FSMCO_HW_SUSPEND		BIT(11)
+#define  APS_FSMCO_PCIE			BIT(12)
+#define  APS_FSMCO_HW_POWERDOWN		BIT(15)
+#define  APS_FSMCO_WLON_RESET		BIT(16)
+
+#define REG_SYS_CLKR			0x0008
+#define  SYS_CLK_ANAD16V_ENABLE		BIT(0)
+#define  SYS_CLK_ANA8M			BIT(1)
+#define  SYS_CLK_MACSLP			BIT(4)
+#define  SYS_CLK_LOADER_ENABLE		BIT(5)
+#define  SYS_CLK_80M_SSC_DISABLE	BIT(7)
+#define  SYS_CLK_80M_SSC_ENABLE_HO	BIT(8)
+#define  SYS_CLK_PHY_SSC_RSTB		BIT(9)
+#define  SYS_CLK_SEC_CLK_ENABLE		BIT(10)
+#define  SYS_CLK_MAC_CLK_ENABLE		BIT(11)
+#define  SYS_CLK_ENABLE			BIT(12)
+#define  SYS_CLK_RING_CLK_ENABLE	BIT(13)
+
+#define REG_9346CR			0x000a
+#define  EEPROM_BOOT			BIT(4)
+#define  EEPROM_ENABLE			BIT(5)
+
+#define REG_EE_VPD			0x000c
+#define REG_AFE_MISC			0x0010
+#define REG_SPS0_CTRL			0x0011
+#define REG_SPS_OCP_CFG			0x0018
+#define REG_RSV_CTRL			0x001c
+
+#define REG_RF_CTRL			0x001f
+#define  RF_ENABLE			BIT(0)
+#define  RF_RSTB			BIT(1)
+#define  RF_SDMRSTB			BIT(2)
+
+#define REG_LDOA15_CTRL			0x0020
+#define  LDOA15_ENABLE			BIT(0)
+#define  LDOA15_STANDBY			BIT(1)
+#define  LDOA15_OBUF			BIT(2)
+#define  LDOA15_REG_VOS			BIT(3)
+#define  LDOA15_VOADJ_SHIFT		4
+
+#define REG_LDOV12D_CTRL		0x0021
+#define  LDOV12D_ENABLE			BIT(0)
+#define  LDOV12D_STANDBY		BIT(1)
+#define  LDOV12D_VADJ_SHIFT		4
+
+#define REG_LDOHCI12_CTRL		0x0022
+
+#define REG_LPLDO_CTRL			0x0023
+#define  LPLDO_HSM			BIT(2)
+#define  LPLDO_LSM_DIS			BIT(3)
+
+#define REG_AFE_XTAL_CTRL		0x0024
+#define  AFE_XTAL_ENABLE		BIT(0)
+#define  AFE_XTAL_B_SELECT		BIT(1)
+#define  AFE_XTAL_GATE_USB		BIT(8)
+#define  AFE_XTAL_GATE_AFE		BIT(11)
+#define  AFE_XTAL_RF_GATE		BIT(14)
+#define  AFE_XTAL_GATE_DIG		BIT(17)
+#define  AFE_XTAL_BT_GATE		BIT(20)
+
+#define REG_AFE_PLL_CTRL		0x0028
+#define  AFE_PLL_ENABLE			BIT(0)
+#define  AFE_PLL_320_ENABLE		BIT(1)
+#define  APE_PLL_FREF_SELECT		BIT(2)
+#define  AFE_PLL_EDGE_SELECT		BIT(3)
+#define  AFE_PLL_WDOGB			BIT(4)
+#define  AFE_PLL_LPF_ENABLE		BIT(5)
+
+#define REG_MAC_PHY_CTRL		0x002c
+
+#define REG_EFUSE_CTRL			0x0030
+#define REG_EFUSE_TEST			0x0034
+#define  EFUSE_TRPT			BIT(7)
+	/*  00: Wifi Efuse, 01: BT Efuse0, 10: BT Efuse1, 11: BT Efuse2 */
+#define  EFUSE_CELL_SEL			(BIT(8) | BIT(9))
+#define  EFUSE_LDOE25_ENABLE		BIT(31)
+#define  EFUSE_SELECT_MASK		0x0300
+#define  EFUSE_WIFI_SELECT		0x0000
+#define  EFUSE_BT0_SELECT		0x0100
+#define  EFUSE_BT1_SELECT		0x0200
+#define  EFUSE_BT2_SELECT		0x0300
+
+#define  EFUSE_ACCESS_ENABLE		0x69	/* RTL8723 only */
+#define  EFUSE_ACCESS_DISABLE		0x00	/* RTL8723 only */
+
+#define REG_PWR_DATA			0x0038
+#define REG_CAL_TIMER			0x003c
+#define REG_ACLK_MON			0x003e
+#define REG_GPIO_MUXCFG			0x0040
+#define REG_GPIO_IO_SEL			0x0042
+#define REG_MAC_PINMUX_CFG		0x0043
+#define REG_GPIO_PIN_CTRL		0x0044
+#define REG_GPIO_INTM			0x0048
+#define REG_LEDCFG0			0x004c
+#define REG_LEDCFG1			0x004d
+#define REG_LEDCFG2			0x004e
+#define  LEDCFG2_DPDT_SELECT		BIT(7)
+#define REG_LEDCFG3			0x004f
+#define REG_LEDCFG			REG_LEDCFG2
+#define REG_FSIMR			0x0050
+#define REG_FSISR			0x0054
+#define REG_HSIMR			0x0058
+#define REG_HSISR			0x005c
+/*  RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */
+#define REG_GPIO_PIN_CTRL_2		0x0060
+/*  RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
+#define REG_GPIO_IO_SEL_2		0x0062
+
+/*  RTL8723 only WIFI/BT/GPS Multi-Function control source. */
+#define REG_MULTI_FUNC_CTRL		0x0068
+
+#define  MULTI_FN_WIFI_HW_PWRDOWN_EN	BIT(0)	/* Enable GPIO[9] as WiFi HW
+						   powerdown source */
+#define  MULTI_FN_WIFI_HW_PWRDOWN_SL	BIT(1)	/* WiFi HW powerdown polarity
+						   control */
+#define  MULTI_WIFI_FUNC_EN		BIT(2)	/* WiFi function enable */
+
+#define  MULTI_WIFI_HW_ROF_EN		BIT(3)	/* Enable GPIO[9] as WiFi RF HW
+						   powerdown source */
+#define  MULTI_BT_HW_PWRDOWN_EN		BIT(16)	/* Enable GPIO[11] as BT HW
+						   powerdown source */
+#define  MULTI_BT_HW_PWRDOWN_SL		BIT(17)	/* BT HW powerdown polarity
+						   control */
+#define  MULTI_BT_FUNC_EN		BIT(18)	/* BT function enable */
+#define  MULTI_BT_HW_ROF_EN		BIT(19)	/* Enable GPIO[11] as BT/GPS
+						   RF HW powerdown source */
+#define  MULTI_GPS_HW_PWRDOWN_EN	BIT(20)	/* Enable GPIO[10] as GPS HW
+						   powerdown source */
+#define  MULTI_GPS_HW_PWRDOWN_SL	BIT(21)	/* GPS HW powerdown polarity
+						   control */
+#define  MULTI_GPS_FUNC_EN		BIT(22)	/* GPS function enable */
+
+#define REG_MCU_FW_DL			0x0080
+#define  MCU_FW_DL_ENABLE		BIT(0)
+#define  MCU_FW_DL_READY		BIT(1)
+#define  MCU_FW_DL_CSUM_REPORT		BIT(2)
+#define  MCU_MAC_INIT_READY		BIT(3)
+#define  MCU_BB_INIT_READY		BIT(4)
+#define  MCU_RF_INIT_READY		BIT(5)
+#define  MCU_WINT_INIT_READY		BIT(6)
+#define  MCU_FW_RAM_SEL			BIT(7)	/* 1: RAM, 0:ROM */
+#define  MCU_CP_RESET			BIT(23)
+
+#define REG_HMBOX_EXT_0			0x0088
+#define REG_HMBOX_EXT_1			0x008a
+#define REG_HMBOX_EXT_2			0x008c
+#define REG_HMBOX_EXT_3			0x008e
+/*  Host suspend counter on FPGA platform */
+#define REG_HOST_SUSP_CNT		0x00bc
+/*  Efuse access protection for RTL8723 */
+#define REG_EFUSE_ACCESS		0x00cf
+#define REG_BIST_SCAN			0x00d0
+#define REG_BIST_RPT			0x00d4
+#define REG_BIST_ROM_RPT		0x00d8
+#define REG_USB_SIE_INTF		0x00e0
+#define REG_PCIE_MIO_INTF		0x00e4
+#define REG_PCIE_MIO_INTD		0x00e8
+#define REG_HPON_FSM			0x00ec
+#define  HPON_FSM_BONDING_MASK		(BIT(22) | BIT(23))
+#define  HPON_FSM_BONDING_1T2R		BIT(22)
+#define REG_SYS_CFG			0x00f0
+#define  SYS_CFG_XCLK_VLD		BIT(0)
+#define  SYS_CFG_ACLK_VLD		BIT(1)
+#define  SYS_CFG_UCLK_VLD		BIT(2)
+#define  SYS_CFG_PCLK_VLD		BIT(3)
+#define  SYS_CFG_PCIRSTB		BIT(4)
+#define  SYS_CFG_V15_VLD		BIT(5)
+#define  SYS_CFG_TRP_B15V_EN		BIT(7)
+#define  SYS_CFG_SIC_IDLE		BIT(8)
+#define  SYS_CFG_BD_MAC2		BIT(9)
+#define  SYS_CFG_BD_MAC1		BIT(10)
+#define  SYS_CFG_IC_MACPHY_MODE		BIT(11)
+#define  SYS_CFG_CHIP_VER		(BIT(12) | BIT(13) | BIT(14) | BIT(15))
+#define  SYS_CFG_BT_FUNC		BIT(16)
+#define  SYS_CFG_VENDOR_ID		BIT(19)
+#define  SYS_CFG_PAD_HWPD_IDN		BIT(22)
+#define  SYS_CFG_TRP_VAUX_EN		BIT(23)
+#define  SYS_CFG_TRP_BT_EN		BIT(24)
+#define  SYS_CFG_BD_PKG_SEL		BIT(25)
+#define  SYS_CFG_BD_HCI_SEL		BIT(26)
+#define  SYS_CFG_TYPE_ID		BIT(27)
+#define  SYS_CFG_RTL_ID			BIT(23) /*  TestChip ID,
+						    1:Test(RLE); 0:MP(RL) */
+#define  SYS_CFG_SPS_SEL		BIT(24) /*  1:LDO regulator mode;
+						    0:Switching regulator mode*/
+#define  SYS_CFG_CHIP_VERSION_MASK	0xf000	/* Bit 12 - 15 */
+#define  SYS_CFG_CHIP_VERSION_SHIFT	12
+
+#define REG_GPIO_OUTSTS			0x00f4	/*  For RTL8723 only. */
+#define  GPIO_EFS_HCI_SEL		(BIT(0) | BIT(1))
+#define  GPIO_PAD_HCI_SEL		(BIT(2) | BIT(3))
+#define  GPIO_HCI_SEL			(BIT(4) | BIT(5))
+#define  GPIO_PKG_SEL_HCI		BIT(6)
+#define  GPIO_FEN_GPS			BIT(7)
+#define  GPIO_FEN_BT			BIT(8)
+#define  GPIO_FEN_WL			BIT(9)
+#define  GPIO_FEN_PCI			BIT(10)
+#define  GPIO_FEN_USB			BIT(11)
+#define  GPIO_BTRF_HWPDN_N		BIT(12)
+#define  GPIO_WLRF_HWPDN_N		BIT(13)
+#define  GPIO_PDN_BT_N			BIT(14)
+#define  GPIO_PDN_GPS_N			BIT(15)
+#define  GPIO_BT_CTL_HWPDN		BIT(16)
+#define  GPIO_GPS_CTL_HWPDN		BIT(17)
+#define  GPIO_PPHY_SUSB			BIT(20)
+#define  GPIO_UPHY_SUSB			BIT(21)
+#define  GPIO_PCI_SUSEN			BIT(22)
+#define  GPIO_USB_SUSEN			BIT(23)
+#define  GPIO_RF_RL_ID			(BIT(31) | BIT(30) | BIT(29) | BIT(28))
+
+/* 0x0100 ~ 0x01FF	MACTOP General Configuration */
+#define REG_CR				0x0100
+#define  CR_HCI_TXDMA_ENABLE		BIT(0)
+#define  CR_HCI_RXDMA_ENABLE		BIT(1)
+#define  CR_TXDMA_ENABLE		BIT(2)
+#define  CR_RXDMA_ENABLE		BIT(3)
+#define  CR_PROTOCOL_ENABLE		BIT(4)
+#define  CR_SCHEDULE_ENABLE		BIT(5)
+#define  CR_MAC_TX_ENABLE		BIT(6)
+#define  CR_MAC_RX_ENABLE		BIT(7)
+#define  CR_SW_BEACON_ENABLE		BIT(8)
+#define  CR_SECURITY_ENABLE		BIT(9)
+#define  CR_CALTIMER_ENABLE		BIT(10)
+
+/* Media Status Register */
+#define REG_MSR				0x0102
+#define  MSR_LINKTYPE_MASK		0x3
+#define  MSR_LINKTYPE_NONE		0x0
+#define  MSR_LINKTYPE_ADHOC		0x1
+#define  MSR_LINKTYPE_STATION		0x2
+#define  MSR_LINKTYPE_AP		0x3
+
+#define REG_PBP				0x0104
+#define  PBP_PAGE_SIZE_RX_SHIFT		0
+#define  PBP_PAGE_SIZE_TX_SHIFT		4
+#define  PBP_PAGE_SIZE_64		0x0
+#define  PBP_PAGE_SIZE_128		0x1
+#define  PBP_PAGE_SIZE_256		0x2
+#define  PBP_PAGE_SIZE_512		0x3
+#define  PBP_PAGE_SIZE_1024		0x4
+
+#define REG_TRXDMA_CTRL			0x010c
+#define  TRXDMA_CTRL_VOQ_SHIFT		4
+#define  TRXDMA_CTRL_VIQ_SHIFT		6
+#define  TRXDMA_CTRL_BEQ_SHIFT		8
+#define  TRXDMA_CTRL_BKQ_SHIFT		10
+#define  TRXDMA_CTRL_MGQ_SHIFT		12
+#define  TRXDMA_CTRL_HIQ_SHIFT		14
+#define  TRXDMA_QUEUE_LOW		1
+#define  TRXDMA_QUEUE_NORMAL		2
+#define  TRXDMA_QUEUE_HIGH		3
+
+#define REG_TRXFF_BNDY			0x0114
+#define REG_TRXFF_STATUS		0x0118
+#define REG_RXFF_PTR			0x011c
+#define REG_HIMR			0x0120
+#define REG_HISR			0x0124
+#define REG_HIMRE			0x0128
+#define REG_HISRE			0x012c
+#define REG_CPWM			0x012f
+#define REG_FWIMR			0x0130
+#define REG_FWISR			0x0134
+#define REG_PKTBUF_DBG_CTRL		0x0140
+#define REG_PKTBUF_DBG_DATA_L		0x0144
+#define REG_PKTBUF_DBG_DATA_H		0x0148
+
+#define REG_TC0_CTRL			0x0150
+#define REG_TC1_CTRL			0x0154
+#define REG_TC2_CTRL			0x0158
+#define REG_TC3_CTRL			0x015c
+#define REG_TC4_CTRL			0x0160
+#define REG_TCUNIT_BASE			0x0164
+#define REG_MBIST_START			0x0174
+#define REG_MBIST_DONE			0x0178
+#define REG_MBIST_FAIL			0x017c
+#define REG_C2HEVT_MSG_NORMAL		0x01a0
+#define REG_C2HEVT_CLEAR		0x01af
+#define REG_C2HEVT_MSG_TEST		0x01b8
+#define REG_MCUTST_1			0x01c0
+#define REG_FMTHR			0x01c8
+#define REG_HMTFR			0x01cc
+#define REG_HMBOX_0			0x01d0
+#define REG_HMBOX_1			0x01d4
+#define REG_HMBOX_2			0x01d8
+#define REG_HMBOX_3			0x01dc
+
+#define REG_LLT_INIT			0x01e0
+#define  LLT_OP_INACTIVE		0x0
+#define  LLT_OP_WRITE			(0x1 << 30)
+#define  LLT_OP_READ			(0x2 << 30)
+#define  LLT_OP_MASK			(0x3 << 30)
+
+#define REG_BB_ACCEESS_CTRL		0x01e8
+#define REG_BB_ACCESS_DATA		0x01ec
+
+/* 0x0200 ~ 0x027F	TXDMA Configuration */
+#define REG_RQPN			0x0200
+#define  RQPN_HI_PQ_SHIFT		0
+#define  RQPN_LO_PQ_SHIFT		8
+#define  RQPN_NORM_PQ_SHIFT		16
+#define  RQPN_LOAD			BIT(31)
+
+#define REG_FIFOPAGE			0x0204
+#define REG_TDECTRL			0x0208
+#define REG_TXDMA_OFFSET_CHK		0x020c
+#define REG_TXDMA_STATUS		0x0210
+#define REG_RQPN_NPQ			0x0214
+
+/* 0x0280 ~ 0x02FF	RXDMA Configuration */
+#define REG_RXDMA_AGG_PG_TH		0x0280
+#define REG_RXPKT_NUM			0x0284
+#define REG_RXDMA_STATUS		0x0288
+
+#define REG_RF_BB_CMD_ADDR		0x02c0
+#define REG_RF_BB_CMD_DATA		0x02c4
+
+/*  spec version 11 */
+/* 0x0400 ~ 0x047F	Protocol Configuration */
+#define REG_VOQ_INFORMATION		0x0400
+#define REG_VIQ_INFORMATION		0x0404
+#define REG_BEQ_INFORMATION		0x0408
+#define REG_BKQ_INFORMATION		0x040c
+#define REG_MGQ_INFORMATION		0x0410
+#define REG_HGQ_INFORMATION		0x0414
+#define REG_BCNQ_INFORMATION		0x0418
+
+#define REG_CPU_MGQ_INFORMATION		0x041c
+#define REG_FWHW_TXQ_CTRL		0x0420
+#define  FWHW_TXQ_CTRL_AMPDU_RETRY	BIT(7)
+#define  FWHW_TXQ_CTRL_XMIT_MGMT_ACK	BIT(12)
+
+#define REG_HWSEQ_CTRL			0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY		0x0424
+#define REG_TXPKTBUF_MGQ_BDNY		0x0425
+#define REG_LIFETIME_EN			0x0426
+#define REG_MULTI_BCNQ_OFFSET		0x0427
+
+#define REG_SPEC_SIFS			0x0428
+#define  SPEC_SIFS_CCK_MASK		0x00ff
+#define  SPEC_SIFS_CCK_SHIFT		0
+#define  SPEC_SIFS_OFDM_MASK		0xff00
+#define  SPEC_SIFS_OFDM_SHIFT		8
+
+#define REG_RETRY_LIMIT			0x042a
+#define  RETRY_LIMIT_LONG_SHIFT		0
+#define  RETRY_LIMIT_LONG_MASK		0x003f
+#define  RETRY_LIMIT_SHORT_SHIFT	8
+#define  RETRY_LIMIT_SHORT_MASK		0x3f00
+
+#define REG_DARFRC			0x0430
+#define REG_RARFRC			0x0438
+#define REG_RESPONSE_RATE_SET		0x0440
+#define  RESPONSE_RATE_BITMAP_ALL	0xfffff
+#define  RESPONSE_RATE_RRSR_CCK_ONLY_1M	0xffff1
+#define  RSR_1M				BIT(0)
+#define  RSR_2M				BIT(1)
+#define  RSR_5_5M			BIT(2)
+#define  RSR_11M			BIT(3)
+#define  RSR_6M				BIT(4)
+#define  RSR_9M				BIT(5)
+#define  RSR_12M			BIT(6)
+#define  RSR_18M			BIT(7)
+#define  RSR_24M			BIT(8)
+#define  RSR_36M			BIT(9)
+#define  RSR_48M			BIT(10)
+#define  RSR_54M			BIT(11)
+#define  RSR_MCS0			BIT(12)
+#define  RSR_MCS1			BIT(13)
+#define  RSR_MCS2			BIT(14)
+#define  RSR_MCS3			BIT(15)
+#define  RSR_MCS4			BIT(16)
+#define  RSR_MCS5			BIT(17)
+#define  RSR_MCS6			BIT(18)
+#define  RSR_MCS7			BIT(19)
+#define  RSR_RSC_LOWER_SUB_CHANNEL	BIT(21)	/* 0x200000 */
+#define  RSR_RSC_UPPER_SUB_CHANNEL	BIT(22)	/* 0x400000 */
+#define  RSR_RSC_BANDWIDTH_40M		(RSR_RSC_UPPER_SUB_CHANNEL | \
+					 RSR_RSC_LOWER_SUB_CHANNEL)
+#define  RSR_ACK_SHORT_PREAMBLE		BIT(23)
+
+#define REG_ARFR0			0x0444
+#define REG_ARFR1			0x0448
+#define REG_ARFR2			0x044c
+#define REG_ARFR3			0x0450
+#define REG_AGGLEN_LMT			0x0458
+#define REG_AMPDU_MIN_SPACE		0x045c
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD	0x045d
+#define REG_FAST_EDCA_CTRL		0x0460
+#define REG_RD_RESP_PKT_TH		0x0463
+#define REG_INIRTS_RATE_SEL		0x0480
+#define REG_INIDATA_RATE_SEL		0x0484
+
+#define REG_POWER_STATUS		0x04a4
+#define REG_POWER_STAGE1		0x04b4
+#define REG_POWER_STAGE2		0x04b8
+#define REG_PKT_VO_VI_LIFE_TIME		0x04c0
+#define REG_PKT_BE_BK_LIFE_TIME		0x04c2
+#define REG_STBC_SETTING		0x04c4
+#define REG_PROT_MODE_CTRL		0x04c8
+#define REG_MAX_AGGR_NUM		0x04ca
+#define REG_RTS_MAX_AGGR_NUM		0x04cb
+#define REG_BAR_MODE_CTRL		0x04cc
+#define REG_RA_TRY_RATE_AGG_LMT		0x04cf
+#define REG_NQOS_SEQ			0x04dc
+#define REG_QOS_SEQ			0x04de
+#define REG_NEED_CPU_HANDLE		0x04e0
+#define REG_PKT_LOSE_RPT		0x04e1
+#define REG_PTCL_ERR_STATUS		0x04e2
+#define REG_DUMMY			0x04fc
+
+/* 0x0500 ~ 0x05FF	EDCA Configuration */
+#define REG_EDCA_VO_PARAM		0x0500
+#define REG_EDCA_VI_PARAM		0x0504
+#define REG_EDCA_BE_PARAM		0x0508
+#define REG_EDCA_BK_PARAM		0x050c
+#define  EDCA_PARAM_ECW_MIN_SHIFT	8
+#define  EDCA_PARAM_ECW_MAX_SHIFT	12
+#define  EDCA_PARAM_TXOP_SHIFT		16
+#define REG_BEACON_TCFG			0x0510
+#define REG_PIFS			0x0512
+#define REG_RDG_PIFS			0x0513
+#define REG_SIFS_CCK			0x0514
+#define REG_SIFS_OFDM			0x0516
+#define REG_TSFTR_SYN_OFFSET		0x0518
+#define REG_AGGR_BREAK_TIME		0x051a
+#define REG_SLOT			0x051b
+#define REG_TX_PTCL_CTRL		0x0520
+#define REG_TXPAUSE			0x0522
+#define REG_DIS_TXREQ_CLR		0x0523
+#define REG_RD_CTRL			0x0524
+#define REG_TBTT_PROHIBIT		0x0540
+#define REG_RD_NAV_NXT			0x0544
+#define REG_NAV_PROT_LEN		0x0546
+
+#define REG_BEACON_CTRL			0x0550
+#define REG_BEACON_CTRL_1		0x0551
+#define  BEACON_ATIM			BIT(0)
+#define  BEACON_CTRL_MBSSID		BIT(1)
+#define  BEACON_CTRL_TX_BEACON_RPT	BIT(2)
+#define  BEACON_FUNCTION_ENABLE		BIT(3)
+#define  BEACON_DISABLE_TSF_UPDATE	BIT(4)
+
+#define REG_MBID_NUM			0x0552
+#define REG_DUAL_TSF_RST		0x0553
+#define  DUAL_TSF_RESET_TSF0		BIT(0)
+#define  DUAL_TSF_RESET_TSF1		BIT(1)
+#define  DUAL_TSF_RESET_P2P		BIT(4)
+#define  DUAL_TSF_TX_OK			BIT(5)
+
+/*  The same as REG_MBSSID_BCN_SPACE */
+#define REG_BCN_INTERVAL		0x0554
+#define REG_MBSSID_BCN_SPACE		0x0554
+
+#define REG_DRIVER_EARLY_INT		0x0558
+#define  DRIVER_EARLY_INT_TIME		5
+
+#define REG_BEACON_DMA_TIME		0x0559
+#define  BEACON_DMA_ATIME_INT_TIME	2
+
+#define REG_ATIMWND			0x055a
+#define REG_BCN_MAX_ERR			0x055d
+#define REG_RXTSF_OFFSET_CCK		0x055e
+#define REG_RXTSF_OFFSET_OFDM		0x055f
+#define REG_TSFTR			0x0560
+#define REG_TSFTR1			0x0568
+#define REG_INIT_TSFTR			0x0564
+#define REG_ATIMWND_1			0x0570
+#define REG_PSTIMER			0x0580
+#define REG_TIMER0			0x0584
+#define REG_TIMER1			0x0588
+#define REG_ACM_HW_CTRL			0x05c0
+#define  ACM_HW_CTRL_BK			BIT(0)
+#define  ACM_HW_CTRL_BE			BIT(1)
+#define  ACM_HW_CTRL_VI			BIT(2)
+#define  ACM_HW_CTRL_VO			BIT(3)
+#define REG_ACM_RST_CTRL		0x05c1
+#define REG_ACMAVG			0x05c2
+#define REG_VO_ADMTIME			0x05c4
+#define REG_VI_ADMTIME			0x05c6
+#define REG_BE_ADMTIME			0x05c8
+#define REG_EDCA_RANDOM_GEN		0x05cc
+#define REG_SCH_TXCMD			0x05d0
+
+/* define REG_FW_TSF_SYNC_CNT		0x04a0 */
+#define REG_FW_RESET_TSF_CNT_1		0x05fc
+#define REG_FW_RESET_TSF_CNT_0		0x05fd
+#define REG_FW_BCN_DIS_CNT		0x05fe
+
+/* 0x0600 ~ 0x07FF  WMAC Configuration */
+#define REG_APSD_CTRL			0x0600
+#define  APSD_CTRL_OFF			BIT(6)
+#define  APSD_CTRL_OFF_STATUS		BIT(7)
+#define REG_BW_OPMODE			0x0603
+#define  BW_OPMODE_20MHZ		BIT(2)
+#define  BW_OPMODE_5G			BIT(1)
+#define  BW_OPMODE_11J			BIT(0)
+
+#define REG_TCR				0x0604
+
+/* Receive Configuration Register */
+#define REG_RCR				0x0608
+#define  RCR_ACCEPT_AP			BIT(0)  /* Accept all unicast packet */
+#define  RCR_ACCEPT_PHYS_MATCH		BIT(1)  /* Accept phys match packet */
+#define  RCR_ACCEPT_MCAST		BIT(2)
+#define  RCR_ACCEPT_BCAST		BIT(3)
+#define  RCR_ACCEPT_ADDR3		BIT(4)  /* Accept address 3 match
+						 packet */
+#define  RCR_ACCEPT_PM			BIT(5)  /* Accept power management
+						 packet */
+#define  RCR_CHECK_BSSID_MATCH		BIT(6)  /* Accept BSSID match packet */
+#define  RCR_CHECK_BSSID_BEACON		BIT(7)  /* Accept BSSID match packet
+						 (Rx beacon, probe rsp) */
+#define  RCR_ACCEPT_CRC32		BIT(8)  /* Accept CRC32 error packet */
+#define  RCR_ACCEPT_ICV			BIT(9)  /* Accept ICV error packet */
+#define  RCR_ACCEPT_DATA_FRAME		BIT(11)
+#define  RCR_ACCEPT_CTRL_FRAME		BIT(12)
+#define  RCR_ACCEPT_MGMT_FRAME		BIT(13)
+#define  RCR_HTC_LOC_CTRL		BIT(14) /* MFC<--HTC=1 MFC-->HTC=0 */
+#define  RCR_MFBEN			BIT(22)
+#define  RCR_LSIGEN			BIT(23)
+#define  RCR_MULTI_BSSID_ENABLE		BIT(24) /* Enable Multiple BssId */
+#define  RCR_ACCEPT_BA_SSN		BIT(27) /* Accept BA SSN */
+#define  RCR_APPEND_PHYSTAT		BIT(28)
+#define  RCR_APPEND_ICV			BIT(29)
+#define  RCR_APPEND_MIC			BIT(30)
+#define  RCR_APPEND_FCS			BIT(31) /* WMAC append FCS after */
+
+#define REG_RX_PKT_LIMIT		0x060c
+#define REG_RX_DLK_TIME			0x060d
+#define REG_RX_DRVINFO_SZ		0x060f
+
+#define REG_MACID			0x0610
+#define REG_BSSID			0x0618
+#define REG_MAR				0x0620
+#define REG_MBIDCAMCFG			0x0628
+
+#define REG_USTIME_EDCA			0x0638
+#define REG_MAC_SPEC_SIFS		0x063a
+
+/*  20100719 Joseph: Hardware register definition change. (HW datasheet v54) */
+	/*  [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
+#define REG_R2T_SIFS			0x063c
+	/*  [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
+#define REG_T2T_SIFS			0x063e
+#define REG_ACKTO			0x0640
+#define REG_CTS2TO			0x0641
+#define REG_EIFS			0x0642
+
+/* WMA, BA, CCX */
+#define REG_NAV_CTRL			0x0650
+/* In units of 128us */
+#define REG_NAV_UPPER			0x0652
+#define  NAV_UPPER_UNIT			128
+
+#define REG_BACAMCMD			0x0654
+#define REG_BACAMCONTENT		0x0658
+#define REG_LBDLY			0x0660
+#define REG_FWDLY			0x0661
+#define REG_RXERR_RPT			0x0664
+#define REG_WMAC_TRXPTCL_CTL		0x0668
+
+/*  Security */
+#define REG_CAM_CMD			0x0670
+#define  CAM_CMD_POLLING		BIT(31)
+#define  CAM_CMD_WRITE			BIT(16)
+#define  CAM_CMD_KEY_SHIFT		3
+#define REG_CAM_WRITE			0x0674
+#define  CAM_WRITE_VALID		BIT(15)
+#define REG_CAM_READ			0x0678
+#define REG_CAM_DEBUG			0x067c
+#define REG_SECURITY_CFG		0x0680
+#define  SEC_CFG_TX_USE_DEFKEY		BIT(0)
+#define  SEC_CFG_RX_USE_DEFKEY		BIT(1)
+#define  SEC_CFG_TX_SEC_ENABLE		BIT(2)
+#define  SEC_CFG_RX_SEC_ENABLE		BIT(3)
+#define  SEC_CFG_SKBYA2			BIT(4)
+#define  SEC_CFG_NO_SKMC		BIT(5)
+#define  SEC_CFG_TXBC_USE_DEFKEY	BIT(6)
+#define  SEC_CFG_RXBC_USE_DEFKEY	BIT(7)
+
+/*  Power */
+#define REG_WOW_CTRL			0x0690
+#define REG_PSSTATUS			0x0691
+#define REG_PS_RX_INFO			0x0692
+#define REG_LPNAV_CTRL			0x0694
+#define REG_WKFMCAM_CMD			0x0698
+#define REG_WKFMCAM_RWD			0x069c
+#define REG_RXFLTMAP0			0x06a0
+#define REG_RXFLTMAP1			0x06a2
+#define REG_RXFLTMAP2			0x06a4
+#define REG_BCN_PSR_RPT			0x06a8
+#define REG_CALB32K_CTRL		0x06ac
+#define REG_PKT_MON_CTRL		0x06b4
+#define REG_BT_COEX_TABLE		0x06c0
+#define REG_WMAC_RESP_TXINFO		0x06d8
+
+#define REG_MACID1			0x0700
+#define REG_BSSID1			0x0708
+
+#define REG_FPGA0_RF_MODE		0x0800
+#define  FPGA_RF_MODE			BIT(0)
+#define  FPGA_RF_MODE_JAPAN		BIT(1)
+#define  FPGA_RF_MODE_CCK		BIT(24)
+#define  FPGA_RF_MODE_OFDM		BIT(25)
+
+#define REG_FPGA0_TX_INFO		0x0804
+#define REG_FPGA0_PSD_FUNC		0x0808
+#define REG_FPGA0_TX_GAIN		0x080c
+#define REG_FPGA0_RF_TIMING1		0x0810
+#define REG_FPGA0_RF_TIMING2		0x0814
+#define REG_FPGA0_POWER_SAVE		0x0818
+#define  FPGA0_PS_LOWER_CHANNEL		BIT(26)
+#define  FPGA0_PS_UPPER_CHANNEL		BIT(27)
+
+#define REG_FPGA0_XA_HSSI_PARM1		0x0820	/* RF 3 wire register */
+#define  FPGA0_HSSI_PARM1_PI		BIT(8)
+#define REG_FPGA0_XA_HSSI_PARM2		0x0824
+#define REG_FPGA0_XB_HSSI_PARM1		0x0828
+#define REG_FPGA0_XB_HSSI_PARM2		0x082c
+#define  FPGA0_HSSI_3WIRE_DATA_LEN	0x800
+#define  FPGA0_HSSI_3WIRE_ADDR_LEN	0x400
+#define  FPGA0_HSSI_PARM2_ADDR_SHIFT	23
+#define  FPGA0_HSSI_PARM2_ADDR_MASK	0x7f800000	/* 0xff << 23 */
+#define  FPGA0_HSSI_PARM2_CCK_HIGH_PWR	BIT(9)
+#define  FPGA0_HSSI_PARM2_EDGE_READ	BIT(31)
+
+#define REG_TX_AGC_B_RATE18_06		0x0830
+#define REG_TX_AGC_B_RATE54_24		0x0834
+#define REG_TX_AGC_B_CCK1_55_MCS32	0x0838
+#define REG_TX_AGC_B_MCS03_MCS00	0x083c
+
+#define REG_FPGA0_XA_LSSI_PARM		0x0840
+#define REG_FPGA0_XB_LSSI_PARM		0x0844
+#define  FPGA0_LSSI_PARM_ADDR_SHIFT	20
+#define  FPGA0_LSSI_PARM_ADDR_MASK	0x0ff00000
+#define  FPGA0_LSSI_PARM_DATA_MASK	0x000fffff
+
+#define REG_TX_AGC_B_MCS07_MCS04	0x0848
+#define REG_TX_AGC_B_MCS11_MCS08	0x084c
+
+#define REG_FPGA0_XCD_SWITCH_CTRL	0x085c
+
+#define REG_FPGA0_XA_RF_INT_OE		0x0860	/* RF Channel switch */
+#define REG_FPGA0_XB_RF_INT_OE		0x0864
+#define  FPGA0_INT_OE_ANTENNA_AB_OPEN	0x000
+#define  FPGA0_INT_OE_ANTENNA_A		BIT(8)
+#define  FPGA0_INT_OE_ANTENNA_B		BIT(9)
+#define  FPGA0_INT_OE_ANTENNA_MASK	(FPGA0_INT_OE_ANTENNA_A | \
+					 FPGA0_INT_OE_ANTENNA_B)
+
+#define REG_TX_AGC_B_MCS15_MCS12	0x0868
+#define REG_TX_AGC_B_CCK11_A_CCK2_11	0x086c
+
+#define REG_FPGA0_XAB_RF_SW_CTRL	0x0870
+#define REG_FPGA0_XA_RF_SW_CTRL		0x0870	/* 16 bit */
+#define REG_FPGA0_XB_RF_SW_CTRL		0x0872	/* 16 bit */
+#define REG_FPGA0_XCD_RF_SW_CTRL	0x0874
+#define REG_FPGA0_XC_RF_SW_CTRL		0x0874	/* 16 bit */
+#define REG_FPGA0_XD_RF_SW_CTRL		0x0876	/* 16 bit */
+#define  FPGA0_RF_3WIRE_DATA		BIT(0)
+#define  FPGA0_RF_3WIRE_CLOC		BIT(1)
+#define  FPGA0_RF_3WIRE_LOAD		BIT(2)
+#define  FPGA0_RF_3WIRE_RW		BIT(3)
+#define  FPGA0_RF_3WIRE_MASK		0xf
+#define  FPGA0_RF_RFENV			BIT(4)
+#define  FPGA0_RF_TRSW			BIT(5)	/* Useless now */
+#define  FPGA0_RF_TRSWB			BIT(6)
+#define  FPGA0_RF_ANTSW			BIT(8)
+#define  FPGA0_RF_ANTSWB		BIT(9)
+#define  FPGA0_RF_PAPE			BIT(10)
+#define  FPGA0_RF_PAPE5G		BIT(11)
+#define  FPGA0_RF_BD_CTRL_SHIFT		16
+
+#define REG_FPGA0_XAB_RF_PARM		0x0878	/* Antenna select path in ODM */
+#define REG_FPGA0_XA_RF_PARM		0x0878	/* 16 bit */
+#define REG_FPGA0_XB_RF_PARM		0x087a	/* 16 bit */
+#define REG_FPGA0_XCD_RF_PARM		0x087c
+#define REG_FPGA0_XC_RF_PARM		0x087c	/* 16 bit */
+#define REG_FPGA0_XD_RF_PARM		0x087e	/* 16 bit */
+#define  FPGA0_RF_PARM_RFA_ENABLE	BIT(1)
+#define  FPGA0_RF_PARM_RFB_ENABLE	BIT(17)
+#define  FPGA0_RF_PARM_CLK_GATE		BIT(31)
+
+#define REG_FPGA0_ANALOG1		0x0880
+#define REG_FPGA0_ANALOG2		0x0884
+#define  FPGA0_ANALOG2_20MHZ		BIT(10)
+#define REG_FPGA0_ANALOG3		0x0888
+#define REG_FPGA0_ANALOG4		0x088c
+
+#define REG_FPGA0_XA_LSSI_READBACK	0x08a0	/* Tranceiver LSSI Readback */
+#define REG_FPGA0_XB_LSSI_READBACK	0x08a4
+#define REG_HSPI_XA_READBACK		0x08b8	/* Transceiver A HSPI read */
+#define REG_HSPI_XB_READBACK		0x08bc	/* Transceiver B HSPI read */
+
+#define REG_FPGA1_RF_MODE		0x0900
+
+#define REG_FPGA1_TX_INFO		0x090c
+
+#define REG_CCK0_SYSTEM			0x0a00
+#define  CCK0_SIDEBAND			BIT(4)
+
+#define REG_CCK0_AFE_SETTING		0x0a04
+
+#define REG_CONFIG_ANT_A		0x0b68
+#define REG_CONFIG_ANT_B		0x0b6c
+
+#define REG_OFDM0_TRX_PATH_ENABLE	0x0c04
+#define OFDM_RF_PATH_RX_MASK		0x0f
+#define OFDM_RF_PATH_RX_A		BIT(0)
+#define OFDM_RF_PATH_RX_B		BIT(1)
+#define OFDM_RF_PATH_RX_C		BIT(2)
+#define OFDM_RF_PATH_RX_D		BIT(3)
+#define OFDM_RF_PATH_TX_MASK		0xf0
+#define OFDM_RF_PATH_TX_A		BIT(4)
+#define OFDM_RF_PATH_TX_B		BIT(5)
+#define OFDM_RF_PATH_TX_C		BIT(6)
+#define OFDM_RF_PATH_TX_D		BIT(7)
+
+#define REG_OFDM0_TR_MUX_PAR		0x0c08
+
+#define REG_OFDM0_XA_RX_IQ_IMBALANCE	0x0c14
+#define REG_OFDM0_XB_RX_IQ_IMBALANCE	0x0c1c
+
+#define REG_OFDM0_ENERGY_CCA_THRES	0x0c4c
+
+#define REG_OFDM0_XA_AGC_CORE1		0x0c50
+#define REG_OFDM0_XA_AGC_CORE2		0x0c54
+#define REG_OFDM0_XB_AGC_CORE1		0x0c58
+#define REG_OFDM0_XB_AGC_CORE2		0x0c5c
+#define REG_OFDM0_XC_AGC_CORE1		0x0c60
+#define REG_OFDM0_XC_AGC_CORE2		0x0c64
+#define REG_OFDM0_XD_AGC_CORE1		0x0c68
+#define REG_OFDM0_XD_AGC_CORE2		0x0c6c
+#define  OFDM0_X_AGC_CORE1_IGI_MASK	0x0000007F
+
+#define REG_OFDM0_AGC_PARM1		0x0c70
+
+#define REG_OFDM0_AGCR_SSI_TABLE	0x0c78
+
+#define REG_OFDM0_XA_TX_IQ_IMBALANCE	0x0c80
+#define REG_OFDM0_XB_TX_IQ_IMBALANCE	0x0c88
+#define REG_OFDM0_XC_TX_IQ_IMBALANCE	0x0c90
+#define REG_OFDM0_XD_TX_IQ_IMBALANCE	0x0c98
+
+#define REG_OFDM0_XC_TX_AFE		0x0c94
+#define REG_OFDM0_XD_TX_AFE		0x0c9c
+
+#define REG_OFDM0_RX_IQ_EXT_ANTA	0x0ca0
+
+#define REG_OFDM1_LSTF			0x0d00
+#define  OFDM_LSTF_PRIME_CH_LOW		BIT(10)
+#define  OFDM_LSTF_PRIME_CH_HIGH	BIT(11)
+#define  OFDM_LSTF_PRIME_CH_MASK	(OFDM_LSTF_PRIME_CH_LOW | \
+					 OFDM_LSTF_PRIME_CH_HIGH)
+#define  OFDM_LSTF_CONTINUE_TX		BIT(28)
+#define  OFDM_LSTF_SINGLE_CARRIER	BIT(29)
+#define  OFDM_LSTF_SINGLE_TONE		BIT(30)
+#define  OFDM_LSTF_MASK			0x70000000
+
+#define REG_OFDM1_TRX_PATH_ENABLE	0x0d04
+
+#define REG_TX_AGC_A_RATE18_06		0x0e00
+#define REG_TX_AGC_A_RATE54_24		0x0e04
+#define REG_TX_AGC_A_CCK1_MCS32		0x0e08
+#define REG_TX_AGC_A_MCS03_MCS00	0x0e10
+#define REG_TX_AGC_A_MCS07_MCS04	0x0e14
+#define REG_TX_AGC_A_MCS11_MCS08	0x0e18
+#define REG_TX_AGC_A_MCS15_MCS12	0x0e1c
+
+#define REG_FPGA0_IQK			0x0e28
+
+#define REG_TX_IQK_TONE_A		0x0e30
+#define REG_RX_IQK_TONE_A		0x0e34
+#define REG_TX_IQK_PI_A			0x0e38
+#define REG_RX_IQK_PI_A			0x0e3c
+
+#define REG_TX_IQK			0x0e40
+#define REG_RX_IQK			0x0e44
+#define REG_IQK_AGC_PTS			0x0e48
+#define REG_IQK_AGC_RSP			0x0e4c
+#define REG_TX_IQK_TONE_B		0x0e50
+#define REG_RX_IQK_TONE_B		0x0e54
+#define REG_TX_IQK_PI_B			0x0e58
+#define REG_RX_IQK_PI_B			0x0e5c
+#define REG_IQK_AGC_CONT		0x0e60
+
+#define REG_BLUETOOTH			0x0e6c
+#define REG_RX_WAIT_CCA			0x0e70
+#define REG_TX_CCK_RFON			0x0e74
+#define REG_TX_CCK_BBON			0x0e78
+#define REG_TX_OFDM_RFON		0x0e7c
+#define REG_TX_OFDM_BBON		0x0e80
+#define REG_TX_TO_RX			0x0e84
+#define REG_TX_TO_TX			0x0e88
+#define REG_RX_CCK			0x0e8c
+
+#define REG_TX_POWER_BEFORE_IQK_A	0x0e94
+#define REG_TX_POWER_AFTER_IQK_A	0x0e9c
+
+#define REG_RX_POWER_BEFORE_IQK_A	0x0ea0
+#define REG_RX_POWER_BEFORE_IQK_A_2	0x0ea4
+#define REG_RX_POWER_AFTER_IQK_A	0x0ea8
+#define REG_RX_POWER_AFTER_IQK_A_2	0x0eac
+
+#define REG_TX_POWER_BEFORE_IQK_B	0x0eb4
+#define REG_TX_POWER_AFTER_IQK_B	0x0ebc
+
+#define REG_RX_POWER_BEFORE_IQK_B	0x0ec0
+#define REG_RX_POWER_BEFORE_IQK_B_2	0x0ec4
+#define REG_RX_POWER_AFTER_IQK_B	0x0ec8
+#define REG_RX_POWER_AFTER_IQK_B_2	0x0ecc
+
+#define REG_RX_OFDM			0x0ed0
+#define REG_RX_WAIT_RIFS		0x0ed4
+#define REG_RX_TO_RX			0x0ed8
+#define REG_STANDBY			0x0edc
+#define REG_SLEEP			0x0ee0
+#define REG_PMPD_ANAEN			0x0eec
+
+#define REG_FW_START_ADDRESS		0x1000
+
+#define REG_USB_INFO			0xfe17
+#define REG_USB_HIMR			0xfe38
+#define  USB_HIMR_TIMEOUT2		BIT(31)
+#define  USB_HIMR_TIMEOUT1		BIT(30)
+#define  USB_HIMR_PSTIMEOUT		BIT(29)
+#define  USB_HIMR_GTINT4		BIT(28)
+#define  USB_HIMR_GTINT3		BIT(27)
+#define  USB_HIMR_TXBCNERR		BIT(26)
+#define  USB_HIMR_TXBCNOK		BIT(25)
+#define  USB_HIMR_TSF_BIT32_TOGGLE	BIT(24)
+#define  USB_HIMR_BCNDMAINT3		BIT(23)
+#define  USB_HIMR_BCNDMAINT2		BIT(22)
+#define  USB_HIMR_BCNDMAINT1		BIT(21)
+#define  USB_HIMR_BCNDMAINT0		BIT(20)
+#define  USB_HIMR_BCNDOK3		BIT(19)
+#define  USB_HIMR_BCNDOK2		BIT(18)
+#define  USB_HIMR_BCNDOK1		BIT(17)
+#define  USB_HIMR_BCNDOK0		BIT(16)
+#define  USB_HIMR_HSISR_IND		BIT(15)
+#define  USB_HIMR_BCNDMAINT_E		BIT(14)
+/* RSVD	BIT(13) */
+#define  USB_HIMR_CTW_END		BIT(12)
+/* RSVD	BIT(11) */
+#define  USB_HIMR_C2HCMD		BIT(10)
+#define  USB_HIMR_CPWM2			BIT(9)
+#define  USB_HIMR_CPWM			BIT(8)
+#define  USB_HIMR_HIGHDOK		BIT(7)	/*  High Queue DMA OK
+						    Interrupt */
+#define  USB_HIMR_MGNTDOK		BIT(6)	/*  Management Queue DMA OK
+						    Interrupt */
+#define  USB_HIMR_BKDOK			BIT(5)	/*  AC_BK DMA OK Interrupt */
+#define  USB_HIMR_BEDOK			BIT(4)	/*  AC_BE DMA OK Interrupt */
+#define  USB_HIMR_VIDOK			BIT(3)	/*  AC_VI DMA OK Interrupt */
+#define  USB_HIMR_VODOK			BIT(2)	/*  AC_VO DMA Interrupt */
+#define  USB_HIMR_RDU			BIT(1)	/*  Receive Descriptor
+						    Unavailable */
+#define  USB_HIMR_ROK			BIT(0)	/*  Receive DMA OK Interrupt */
+
+#define REG_USB_SPECIAL_OPTION		0xfe55
+#define REG_USB_DMA_AGG_TO		0xfe5b
+#define REG_USB_AGG_TO			0xfe5c
+#define REG_USB_AGG_TH			0xfe5d
+
+#define REG_NORMAL_SIE_VID		0xfe60	/* 0xfe60 - 0xfe61 */
+#define REG_NORMAL_SIE_PID		0xfe62	/* 0xfe62 - 0xfe63 */
+#define REG_NORMAL_SIE_OPTIONAL		0xfe64
+#define REG_NORMAL_SIE_EP		0xfe65	/* 0xfe65 - 0xfe67 */
+#define REG_NORMAL_SIE_EP_TX		0xfe66
+#define  NORMAL_SIE_EP_TX_HIGH_MASK	0x000f
+#define  NORMAL_SIE_EP_TX_NORMAL_MASK	0x00f0
+#define  NORMAL_SIE_EP_TX_LOW_MASK	0x0f00
+
+#define REG_NORMAL_SIE_PHY		0xfe68	/* 0xfe68 - 0xfe6b */
+#define REG_NORMAL_SIE_OPTIONAL2	0xfe6c
+#define REG_NORMAL_SIE_GPS_EP		0xfe6d	/* RTL8723 only */
+#define REG_NORMAL_SIE_MAC_ADDR		0xfe70	/* 0xfe70 - 0xfe75 */
+#define REG_NORMAL_SIE_STRING		0xfe80	/* 0xfe80 - 0xfedf */
+
+/* RF6052 registers */
+#define RF6052_REG_AC			0x00
+#define RF6052_REG_IQADJ_G1		0x01
+#define RF6052_REG_IQADJ_G2		0x02
+#define RF6052_REG_BS_PA_APSET_G1_G4	0x03
+#define RF6052_REG_BS_PA_APSET_G5_G8	0x04
+#define RF6052_REG_POW_TRSW		0x05
+#define RF6052_REG_GAIN_RX		0x06
+#define RF6052_REG_GAIN_TX		0x07
+#define RF6052_REG_TXM_IDAC		0x08
+#define RF6052_REG_IPA_G		0x09
+#define RF6052_REG_TXBIAS_G		0x0a
+#define RF6052_REG_TXPA_AG		0x0b
+#define RF6052_REG_IPA_A		0x0c
+#define RF6052_REG_TXBIAS_A		0x0d
+#define RF6052_REG_BS_PA_APSET_G9_G11	0x0e
+#define RF6052_REG_BS_IQGEN		0x0f
+#define RF6052_REG_MODE1		0x10
+#define RF6052_REG_MODE2		0x11
+#define RF6052_REG_RX_AGC_HP		0x12
+#define RF6052_REG_TX_AGC		0x13
+#define RF6052_REG_BIAS			0x14
+#define RF6052_REG_IPA			0x15
+#define RF6052_REG_TXBIAS		0x16
+#define RF6052_REG_POW_ABILITY		0x17
+#define RF6052_REG_MODE_AG		0x18	/* RF channel and BW switch */
+#define  MODE_AG_CHANNEL_MASK		0x3ff
+#define  MODE_AG_CHANNEL_20MHZ		BIT(10)
+
+#define RF6052_REG_TOP			0x19
+#define RF6052_REG_RX_G1		0x1a
+#define RF6052_REG_RX_G2		0x1b
+#define RF6052_REG_RX_BB2		0x1c
+#define RF6052_REG_RX_BB1		0x1d
+#define RF6052_REG_RCK1			0x1e
+#define RF6052_REG_RCK2			0x1f
+#define RF6052_REG_TX_G1		0x20
+#define RF6052_REG_TX_G2		0x21
+#define RF6052_REG_TX_G3		0x22
+#define RF6052_REG_TX_BB1		0x23
+#define RF6052_REG_T_METER		0x24
+#define RF6052_REG_SYN_G1		0x25	/* RF TX Power control */
+#define RF6052_REG_SYN_G2		0x26	/* RF TX Power control */
+#define RF6052_REG_SYN_G3		0x27	/* RF TX Power control */
+#define RF6052_REG_SYN_G4		0x28	/* RF TX Power control */
+#define RF6052_REG_SYN_G5		0x29	/* RF TX Power control */
+#define RF6052_REG_SYN_G6		0x2a	/* RF TX Power control */
+#define RF6052_REG_SYN_G7		0x2b	/* RF TX Power control */
+#define RF6052_REG_SYN_G8		0x2c	/* RF TX Power control */
+
+#define RF6052_REG_RCK_OS		0x30	/* RF TX PA control */
+
+#define RF6052_REG_TXPA_G1		0x31	/* RF TX PA control */
+#define RF6052_REG_TXPA_G2		0x32	/* RF TX PA control */
+#define RF6052_REG_TXPA_G3		0x33	/* RF TX PA control */
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/realtek/rtlwifi/Kconfig
similarity index 100%
rename from drivers/net/wireless/rtlwifi/Kconfig
rename to drivers/net/wireless/realtek/rtlwifi/Kconfig
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/Makefile
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/base.c
rename to drivers/net/wireless/realtek/rtlwifi/base.c
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/base.h
rename to drivers/net/wireless/realtek/rtlwifi/base.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/cam.c
rename to drivers/net/wireless/realtek/rtlwifi/cam.c
diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/realtek/rtlwifi/cam.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/cam.h
rename to drivers/net/wireless/realtek/rtlwifi/cam.h
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
similarity index 99%
rename from drivers/net/wireless/rtlwifi/core.c
rename to drivers/net/wireless/realtek/rtlwifi/core.c
index 585d088..c925a4d 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1373,7 +1373,7 @@
 			       struct ieee80211_vif *vif,
 			       enum ieee80211_ampdu_mlme_action action,
 			       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			       u8 buf_size)
+			       u8 buf_size, bool amsdu)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/realtek/rtlwifi/core.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/core.h
rename to drivers/net/wireless/realtek/rtlwifi/core.h
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/debug.c
rename to drivers/net/wireless/realtek/rtlwifi/debug.c
diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/debug.h
rename to drivers/net/wireless/realtek/rtlwifi/debug.h
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/efuse.c
rename to drivers/net/wireless/realtek/rtlwifi/efuse.c
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/efuse.h
rename to drivers/net/wireless/realtek/rtlwifi/efuse.h
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/pci.c
rename to drivers/net/wireless/realtek/rtlwifi/pci.c
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/pci.h
rename to drivers/net/wireless/realtek/rtlwifi/pci.h
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/ps.c
rename to drivers/net/wireless/realtek/rtlwifi/ps.c
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/realtek/rtlwifi/ps.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/ps.h
rename to drivers/net/wireless/realtek/rtlwifi/ps.h
diff --git a/drivers/net/wireless/rtlwifi/pwrseqcmd.h b/drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/pwrseqcmd.h
rename to drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rc.c
rename to drivers/net/wireless/realtek/rtlwifi/rc.c
diff --git a/drivers/net/wireless/rtlwifi/rc.h b/drivers/net/wireless/realtek/rtlwifi/rc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rc.h
rename to drivers/net/wireless/realtek/rtlwifi/rc.h
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/regd.c
rename to drivers/net/wireless/realtek/rtlwifi/regd.c
diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/realtek/rtlwifi/regd.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/regd.h
rename to drivers/net/wireless/realtek/rtlwifi/regd.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/main.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
similarity index 99%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 25db369..34ce064 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -1946,6 +1946,14 @@
 		rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
 		mac->rx_data_filter = *(u16 *)val;
 		break;
+	case HW_VAR_KEEP_ALIVE:{
+			u8 array[2];
+			array[0] = 0xff;
+			array[1] = *((u8 *)val);
+			rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2,
+					    array);
+			break;
+		}
 	default:
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
 			 "switch case not processed\n");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/btc.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/main.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
diff --git a/drivers/net/wireless/rtlwifi/stats.c b/drivers/net/wireless/realtek/rtlwifi/stats.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/stats.c
rename to drivers/net/wireless/realtek/rtlwifi/stats.c
diff --git a/drivers/net/wireless/rtlwifi/stats.h b/drivers/net/wireless/realtek/rtlwifi/stats.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/stats.h
rename to drivers/net/wireless/realtek/rtlwifi/stats.h
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/usb.c
rename to drivers/net/wireless/realtek/rtlwifi/usb.c
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/usb.h
rename to drivers/net/wireless/realtek/rtlwifi/usb.h
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/wifi.h
rename to drivers/net/wireless/realtek/rtlwifi/wifi.h
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 71a825c..a13d1f2 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1236,7 +1236,7 @@
 
 	netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
 
-	if (rts_threshold < 0 || rts_threshold > 2347)
+	if (rts_threshold == -1 || rts_threshold > 2347)
 		rts_threshold = 2347;
 
 	tmp = cpu_to_le32(rts_threshold);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 7e804324..b5bcc93 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -664,6 +664,7 @@
  * @tid: Traffic identifier.
  * @ssn: Pointer to ssn value.
  * @buf_size: Buffer size (for kernel version > 2.6.38).
+ * @amsdu: is AMSDU in AMPDU allowed
  *
  * Return: status: 0 on success, negative error code on failure.
  */
@@ -673,7 +674,8 @@
 				     struct ieee80211_sta *sta,
 				     unsigned short tid,
 				     unsigned short *ssn,
-				     unsigned char buf_size)
+				     unsigned char buf_size,
+				     bool amsdu)
 {
 	int status = -EOPNOTSUPP;
 	struct rsi_hw *adapter = hw->priv;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 9524564..9733b31 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -7937,7 +7937,7 @@
 int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			enum ieee80211_ampdu_mlme_action action,
 			struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			u8 buf_size)
+			u8 buf_size, bool amsdu)
 {
 	struct rt2x00_sta *sta_priv = (struct rt2x00_sta *)sta->drv_priv;
 	int ret = 0;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 1609b8a..440790b 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -220,7 +220,7 @@
 int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			enum ieee80211_ampdu_mlme_action action,
 			struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			u8 buf_size);
+			u8 buf_size, bool amsdu);
 int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
 		      struct survey_info *survey);
 void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index e819369..ec7f6af 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5263,7 +5263,7 @@
 				  struct ieee80211_vif *vif,
 				  enum ieee80211_ampdu_mlme_action action,
 				  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-				  u8 buf_size)
+				  u8 buf_size, bool amsdu)
 {
 	struct wl1271 *wl = hw->priv;
 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f821a97..9bf63c2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1819,19 +1819,22 @@
 		goto destroy_ring;
 	}
 
+	if (xenbus_exists(XBT_NIL,
+			  info->xbdev->otherend, "multi-queue-max-queues")) {
+		/* Write the number of queues */
+		err = xenbus_printf(xbt, dev->nodename,
+				    "multi-queue-num-queues", "%u", num_queues);
+		if (err) {
+			message = "writing multi-queue-num-queues";
+			goto abort_transaction_no_dev_fatal;
+		}
+	}
+
 	if (num_queues == 1) {
 		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
 		if (err)
 			goto abort_transaction_no_dev_fatal;
 	} else {
-		/* Write the number of queues */
-		err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues",
-				    "%u", num_queues);
-		if (err) {
-			message = "writing multi-queue-num-queues";
-			goto abort_transaction_no_dev_fatal;
-		}
-
 		/* Write the keys for each queue */
 		for (i = 0; i < num_queues; ++i) {
 			queue = &info->queues[i];
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 87751cf..865a3e3 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -190,14 +190,17 @@
 	case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
 	case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
 	case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
 	case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
 	case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
 	case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
 	case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
 	case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
 	case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
 	case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
 	case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
 		return 1;
 	}
 	return 0;
@@ -237,7 +240,7 @@
 
 static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
 {
-	if (idx < 0 || idx > ndev->mw_count)
+	if (idx < 0 || idx >= ndev->mw_count)
 		return -EINVAL;
 	return ndev->reg->mw_bar[idx];
 }
@@ -572,10 +575,13 @@
 			 "Connection Topology -\t%s\n",
 			 ntb_topo_string(ndev->ntb.topo));
 
-	off += scnprintf(buf + off, buf_size - off,
-			 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
-	off += scnprintf(buf + off, buf_size - off,
-			 "B2B MW Idx -\t\t%d\n", ndev->b2b_idx);
+	if (ndev->b2b_idx != UINT_MAX) {
+		off += scnprintf(buf + off, buf_size - off,
+				 "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
+		off += scnprintf(buf + off, buf_size - off,
+				 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
+	}
+
 	off += scnprintf(buf + off, buf_size - off,
 			 "BAR4 Split -\t\t%s\n",
 			 ndev->bar4_split ? "yes" : "no");
@@ -1484,7 +1490,7 @@
 	pdev = ndev_pdev(ndev);
 	mmio = ndev->self_mmio;
 
-	if (ndev->b2b_idx >= ndev->mw_count) {
+	if (ndev->b2b_idx == UINT_MAX) {
 		dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
 		b2b_bar = 0;
 		ndev->b2b_off = 0;
@@ -1776,6 +1782,13 @@
 			else
 				ndev->b2b_idx = b2b_mw_idx;
 
+			if (ndev->b2b_idx >= ndev->mw_count) {
+				dev_dbg(ndev_dev(ndev),
+					"b2b_mw_idx %d invalid for mw_count %u\n",
+					b2b_mw_idx, ndev->mw_count);
+				return -EINVAL;
+			}
+
 			dev_dbg(ndev_dev(ndev),
 				"setting up b2b mw idx %d means %d\n",
 				b2b_mw_idx, ndev->b2b_idx);
@@ -1843,6 +1856,9 @@
 	case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
 	case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
 	case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
 		ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
 		break;
 	}
@@ -1857,6 +1873,9 @@
 	case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
 	case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
 	case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
 		ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
 		break;
 	}
@@ -1878,6 +1897,9 @@
 	case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
 	case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
 	case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
 		ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
 		break;
 	}
@@ -1996,7 +2018,7 @@
 	ndev->ntb.ops = &intel_ntb_ops;
 
 	ndev->b2b_off = 0;
-	ndev->b2b_idx = INT_MAX;
+	ndev->b2b_idx = UINT_MAX;
 
 	ndev->bar4_split = 0;
 
@@ -2234,14 +2256,17 @@
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
 	{0}
 };
 MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index 7ddaf38..ea0612f7 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -67,6 +67,9 @@
 #define PCI_DEVICE_ID_INTEL_NTB_PS_HSX	0x2F0E
 #define PCI_DEVICE_ID_INTEL_NTB_SS_HSX	0x2F0F
 #define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD	0x0C4E
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_BDX	0x6F0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_BDX	0x6F0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX	0x6F0F
 
 /* Intel Xeon hardware */
 
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 1c6386d..6e3ee90 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -119,7 +119,8 @@
 	struct ntb_transport_ctx *transport;
 	struct ntb_dev *ndev;
 	void *cb_data;
-	struct dma_chan *dma_chan;
+	struct dma_chan *tx_dma_chan;
+	struct dma_chan *rx_dma_chan;
 
 	bool client_ready;
 	bool link_is_up;
@@ -297,7 +298,7 @@
 
 static int ntb_bus_init(struct ntb_transport_ctx *nt)
 {
-	list_add(&nt->entry, &ntb_transport_list);
+	list_add_tail(&nt->entry, &ntb_transport_list);
 	return 0;
 }
 
@@ -452,7 +453,7 @@
 
 	out_offset = 0;
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
-			       "NTB QP stats\n");
+			       "\nNTB QP stats:\n\n");
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
 			       "rx_bytes - \t%llu\n", qp->rx_bytes);
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
@@ -470,11 +471,11 @@
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
 			       "rx_err_ver - \t%llu\n", qp->rx_err_ver);
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
-			       "rx_buff - \t%p\n", qp->rx_buff);
+			       "rx_buff - \t0x%p\n", qp->rx_buff);
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
 			       "rx_index - \t%u\n", qp->rx_index);
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
-			       "rx_max_entry - \t%u\n", qp->rx_max_entry);
+			       "rx_max_entry - \t%u\n\n", qp->rx_max_entry);
 
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
 			       "tx_bytes - \t%llu\n", qp->tx_bytes);
@@ -489,15 +490,32 @@
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
 			       "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
-			       "tx_mw - \t%p\n", qp->tx_mw);
+			       "tx_mw - \t0x%p\n", qp->tx_mw);
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
-			       "tx_index - \t%u\n", qp->tx_index);
+			       "tx_index (H) - \t%u\n", qp->tx_index);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "RRI (T) - \t%u\n",
+			       qp->remote_rx_info->entry);
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
 			       "tx_max_entry - \t%u\n", qp->tx_max_entry);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "free tx - \t%u\n",
+			       ntb_transport_tx_free_entry(qp));
 
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
-			       "\nQP Link %s\n",
+			       "\n");
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Using TX DMA - \t%s\n",
+			       qp->tx_dma_chan ? "Yes" : "No");
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Using RX DMA - \t%s\n",
+			       qp->rx_dma_chan ? "Yes" : "No");
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "QP Link - \t%s\n",
 			       qp->link_is_up ? "Up" : "Down");
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "\n");
+
 	if (out_offset > out_count)
 		out_offset = out_count;
 
@@ -535,6 +553,7 @@
 	}
 	entry = list_first_entry(list, struct ntb_queue_entry, entry);
 	list_del(&entry->entry);
+
 out:
 	spin_unlock_irqrestore(lock, flags);
 
@@ -1206,7 +1225,7 @@
 {
 	struct dma_async_tx_descriptor *txd;
 	struct ntb_transport_qp *qp = entry->qp;
-	struct dma_chan *chan = qp->dma_chan;
+	struct dma_chan *chan = qp->rx_dma_chan;
 	struct dma_device *device;
 	size_t pay_off, buff_off, len;
 	struct dmaengine_unmap_data *unmap;
@@ -1219,18 +1238,18 @@
 		goto err;
 
 	if (len < copy_bytes)
-		goto err_wait;
+		goto err;
 
 	device = chan->device;
 	pay_off = (size_t)offset & ~PAGE_MASK;
 	buff_off = (size_t)buf & ~PAGE_MASK;
 
 	if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
-		goto err_wait;
+		goto err;
 
 	unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
 	if (!unmap)
-		goto err_wait;
+		goto err;
 
 	unmap->len = len;
 	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
@@ -1273,12 +1292,6 @@
 	dmaengine_unmap_put(unmap);
 err_get_unmap:
 	dmaengine_unmap_put(unmap);
-err_wait:
-	/* If the callbacks come out of order, the writing of the index to the
-	 * last completed will be out of order.  This may result in the
-	 * receive stalling forever.
-	 */
-	dma_sync_wait(chan, qp->last_cookie);
 err:
 	ntb_memcpy_rx(entry, offset);
 	qp->rx_memcpy++;
@@ -1373,8 +1386,8 @@
 			break;
 	}
 
-	if (i && qp->dma_chan)
-		dma_async_issue_pending(qp->dma_chan);
+	if (i && qp->rx_dma_chan)
+		dma_async_issue_pending(qp->rx_dma_chan);
 
 	if (i == qp->rx_max_entry) {
 		/* there is more work to do */
@@ -1441,7 +1454,7 @@
 {
 	struct ntb_payload_header __iomem *hdr;
 	struct dma_async_tx_descriptor *txd;
-	struct dma_chan *chan = qp->dma_chan;
+	struct dma_chan *chan = qp->tx_dma_chan;
 	struct dma_device *device;
 	size_t dest_off, buff_off;
 	struct dmaengine_unmap_data *unmap;
@@ -1634,14 +1647,27 @@
 	dma_cap_set(DMA_MEMCPY, dma_mask);
 
 	if (use_dma) {
-		qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn,
-						   (void *)(unsigned long)node);
-		if (!qp->dma_chan)
-			dev_info(&pdev->dev, "Unable to allocate DMA channel\n");
+		qp->tx_dma_chan =
+			dma_request_channel(dma_mask, ntb_dma_filter_fn,
+					    (void *)(unsigned long)node);
+		if (!qp->tx_dma_chan)
+			dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n");
+
+		qp->rx_dma_chan =
+			dma_request_channel(dma_mask, ntb_dma_filter_fn,
+					    (void *)(unsigned long)node);
+		if (!qp->rx_dma_chan)
+			dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n");
 	} else {
-		qp->dma_chan = NULL;
+		qp->tx_dma_chan = NULL;
+		qp->rx_dma_chan = NULL;
 	}
-	dev_dbg(&pdev->dev, "Using %s memcpy\n", qp->dma_chan ? "DMA" : "CPU");
+
+	dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
+		qp->tx_dma_chan ? "DMA" : "CPU");
+
+	dev_dbg(&pdev->dev, "Using %s memcpy for RX\n",
+		qp->rx_dma_chan ? "DMA" : "CPU");
 
 	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
 		entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
@@ -1676,8 +1702,10 @@
 err1:
 	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
 		kfree(entry);
-	if (qp->dma_chan)
-		dma_release_channel(qp->dma_chan);
+	if (qp->tx_dma_chan)
+		dma_release_channel(qp->tx_dma_chan);
+	if (qp->rx_dma_chan)
+		dma_release_channel(qp->rx_dma_chan);
 	nt->qp_bitmap_free |= qp_bit;
 err:
 	return NULL;
@@ -1701,12 +1729,27 @@
 
 	pdev = qp->ndev->pdev;
 
-	if (qp->dma_chan) {
-		struct dma_chan *chan = qp->dma_chan;
+	if (qp->tx_dma_chan) {
+		struct dma_chan *chan = qp->tx_dma_chan;
 		/* Putting the dma_chan to NULL will force any new traffic to be
 		 * processed by the CPU instead of the DAM engine
 		 */
-		qp->dma_chan = NULL;
+		qp->tx_dma_chan = NULL;
+
+		/* Try to be nice and wait for any queued DMA engine
+		 * transactions to process before smashing it with a rock
+		 */
+		dma_sync_wait(chan, qp->last_cookie);
+		dmaengine_terminate_all(chan);
+		dma_release_channel(chan);
+	}
+
+	if (qp->rx_dma_chan) {
+		struct dma_chan *chan = qp->rx_dma_chan;
+		/* Putting the dma_chan to NULL will force any new traffic to be
+		 * processed by the CPU instead of the DAM engine
+		 */
+		qp->rx_dma_chan = NULL;
 
 		/* Try to be nice and wait for any queued DMA engine
 		 * transactions to process before smashing it with a rock
@@ -1843,7 +1886,7 @@
 	entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
 	if (!entry) {
 		qp->tx_err_no_buf++;
-		return -ENOMEM;
+		return -EBUSY;
 	}
 
 	entry->cb_data = cb;
@@ -1954,21 +1997,34 @@
 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
 {
 	unsigned int max;
+	unsigned int copy_align;
 
 	if (!qp)
 		return 0;
 
-	if (!qp->dma_chan)
+	if (!qp->tx_dma_chan && !qp->rx_dma_chan)
 		return qp->tx_max_frame - sizeof(struct ntb_payload_header);
 
+	copy_align = max(qp->tx_dma_chan->device->copy_align,
+			 qp->rx_dma_chan->device->copy_align);
+
 	/* If DMA engine usage is possible, try to find the max size for that */
 	max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
-	max -= max % (1 << qp->dma_chan->device->copy_align);
+	max -= max % (1 << copy_align);
 
 	return max;
 }
 EXPORT_SYMBOL_GPL(ntb_transport_max_size);
 
+unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
+{
+	unsigned int head = qp->tx_index;
+	unsigned int tail = qp->remote_rx_info->entry;
+
+	return tail > head ? tail - head : qp->tx_max_entry + tail - head;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
+
 static void ntb_transport_doorbell_callback(void *data, int vector)
 {
 	struct ntb_transport_ctx *nt = data;
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 59ad54a6..cb47751 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -128,13 +128,13 @@
 	struct nd_btt *nd_btt = to_nd_btt(dev);
 	ssize_t rc;
 
-	nvdimm_bus_lock(dev);
 	device_lock(dev);
+	nvdimm_bus_lock(dev);
 	rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
 	dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
 			rc, buf, buf[len - 1] == '\n' ? "" : "\n");
-	device_unlock(dev);
 	nvdimm_bus_unlock(dev);
+	device_unlock(dev);
 
 	return rc;
 }
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 3fd7d0d..71805a1 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -148,13 +148,13 @@
 	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
 	ssize_t rc;
 
-	nvdimm_bus_lock(dev);
 	device_lock(dev);
+	nvdimm_bus_lock(dev);
 	rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
 	dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
 			rc, buf, buf[len - 1] == '\n' ? "" : "\n");
-	device_unlock(dev);
 	nvdimm_bus_unlock(dev);
+	device_unlock(dev);
 
 	return rc;
 }
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index b952538..0ba6a97 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -92,6 +92,8 @@
 	struct pmem_device *pmem = bdev->bd_disk->private_data;
 
 	pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
+	if (rw & WRITE)
+		wmb_pmem();
 	page_endio(page, rw & WRITE, 0);
 
 	return 0;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 1350fa2..a87a868 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -197,7 +197,8 @@
  * of_phy_find_device - Give a PHY node, find the phy_device
  * @phy_np: Pointer to the phy's device tree node
  *
- * Returns a pointer to the phy_device.
+ * If successful, returns a pointer to the phy_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure.
  */
 struct phy_device *of_phy_find_device(struct device_node *phy_np)
 {
@@ -217,7 +218,9 @@
  * @hndlr: Link state callback for the network device
  * @iface: PHY data interface type
  *
- * Returns a pointer to the phy_device if successful.  NULL otherwise
+ * If successful, returns a pointer to the phy_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure. The
+ * refcount must be dropped by calling phy_disconnect() or phy_detach().
  */
 struct phy_device *of_phy_connect(struct net_device *dev,
 				  struct device_node *phy_np,
@@ -225,13 +228,19 @@
 				  phy_interface_t iface)
 {
 	struct phy_device *phy = of_phy_find_device(phy_np);
+	int ret;
 
 	if (!phy)
 		return NULL;
 
 	phy->dev_flags = flags;
 
-	return phy_connect_direct(dev, phy, hndlr, iface) ? NULL : phy;
+	ret = phy_connect_direct(dev, phy, hndlr, iface);
+
+	/* refcount is held by phy_connect_direct() on success */
+	put_device(&phy->dev);
+
+	return ret ? NULL : phy;
 }
 EXPORT_SYMBOL(of_phy_connect);
 
@@ -241,17 +250,27 @@
  * @phy_np: Node pointer for the PHY
  * @flags: flags to pass to the PHY
  * @iface: PHY data interface type
+ *
+ * If successful, returns a pointer to the phy_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure. The
+ * refcount must be dropped by calling phy_disconnect() or phy_detach().
  */
 struct phy_device *of_phy_attach(struct net_device *dev,
 				 struct device_node *phy_np, u32 flags,
 				 phy_interface_t iface)
 {
 	struct phy_device *phy = of_phy_find_device(phy_np);
+	int ret;
 
 	if (!phy)
 		return NULL;
 
-	return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy;
+	ret = phy_attach_direct(dev, phy, flags, iface);
+
+	/* refcount is held by phy_attach_direct() on success */
+	put_device(&phy->dev);
+
+	return ret ? NULL : phy;
 }
 EXPORT_SYMBOL(of_phy_attach);
 
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 1710d9d..2306313 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -38,8 +38,8 @@
 	 */
 	rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
 	if (rc != 0)
-		return rc;
-	/* No pin, exit */
+		goto err;
+	/* No pin, exit with no error message. */
 	if (pin == 0)
 		return -ENODEV;
 
@@ -53,8 +53,10 @@
 			ppnode = pci_bus_to_OF_node(pdev->bus);
 
 			/* No node for host bridge ? give up */
-			if (ppnode == NULL)
-				return -EINVAL;
+			if (ppnode == NULL) {
+				rc = -EINVAL;
+				goto err;
+			}
 		} else {
 			/* We found a P2P bridge, check if it has a node */
 			ppnode = pci_device_to_OF_node(ppdev);
@@ -86,7 +88,13 @@
 	out_irq->args[0] = pin;
 	laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
 	laddr[1] = laddr[2] = cpu_to_be32(0);
-	return of_irq_parse_raw(laddr, out_irq);
+	rc = of_irq_parse_raw(laddr, out_irq);
+	if (rc)
+		goto err;
+	return 0;
+err:
+	dev_err(&pdev->dev, "of_irq_parse_pci() failed with rc=%d\n", rc);
+	return rc;
 }
 EXPORT_SYMBOL_GPL(of_irq_parse_pci);
 
@@ -105,10 +113,8 @@
 	int ret;
 
 	ret = of_irq_parse_pci(dev, &oirq);
-	if (ret) {
-		dev_err(&dev->dev, "of_irq_parse_pci() failed with rc=%d\n", ret);
+	if (ret)
 		return 0; /* Proper return code 0 == NO_IRQ */
-	}
 
 	return irq_create_of_mapping(&oirq);
 }
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index baec33c..a0580af 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -560,6 +560,9 @@
 	} else if (bus->parent) {
 		int i;
 
+		pci_read_bridge_bases(bus);
+
+
 		for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
 			if((bus->self->resource[i].flags & 
 			    (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 7b9e89b..a32c1f6 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -693,6 +693,7 @@
 	if (bus->parent) {
 		int i;
 		/* PCI-PCI Bridge */
+		pci_read_bridge_bases(bus);
 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
 			pci_claim_bridge_resource(bus->self, i);
 	} else {
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 769f7e3..59ac36f 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -442,7 +442,8 @@
 static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
 			       void *arg)
 {
-	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+	struct pci_dev *tdev = pci_get_slot(dev->bus,
+					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
 	ssize_t ret;
 
 	if (!tdev)
@@ -456,7 +457,8 @@
 static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
 				const void *arg)
 {
-	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+	struct pci_dev *tdev = pci_get_slot(dev->bus,
+					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
 	ssize_t ret;
 
 	if (!tdev)
@@ -473,22 +475,6 @@
 	.release = pci_vpd_pci22_release,
 };
 
-static int pci_vpd_f0_dev_check(struct pci_dev *dev)
-{
-	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
-	int ret = 0;
-
-	if (!tdev)
-		return -ENODEV;
-	if (!tdev->vpd || !tdev->multifunction ||
-	    dev->class != tdev->class || dev->vendor != tdev->vendor ||
-	    dev->device != tdev->device)
-		ret = -ENODEV;
-
-	pci_dev_put(tdev);
-	return ret;
-}
-
 int pci_vpd_pci22_init(struct pci_dev *dev)
 {
 	struct pci_vpd_pci22 *vpd;
@@ -497,12 +483,7 @@
 	cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
 	if (!cap)
 		return -ENODEV;
-	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
-		int ret = pci_vpd_f0_dev_check(dev);
 
-		if (ret)
-			return ret;
-	}
 	vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
 	if (!vpd)
 		return -ENOMEM;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 6fbd3f2..d3346d2 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -256,6 +256,8 @@
 
 		res->start = start;
 		res->end = end;
+		res->flags &= ~IORESOURCE_UNSET;
+		orig_res.flags &= ~IORESOURCE_UNSET;
 		dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
 				 &orig_res, res);
 
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 81253e7..0aa81bd 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -110,7 +110,7 @@
 	return -EINVAL;
 }
 
-static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
@@ -138,8 +138,7 @@
  * Traverse through pending legacy interrupts and invoke handler for each. Also
  * takes care of interrupt controller level mask/ack operation.
  */
-static void ks_pcie_legacy_irq_handler(unsigned int __irq,
-				       struct irq_desc *desc)
+static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 367e28f..c4f64bf 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -362,6 +362,7 @@
 static struct of_device_id rcar_pci_of_match[] = {
 	{ .compatible = "renesas,pci-r8a7790", },
 	{ .compatible = "renesas,pci-r8a7791", },
+	{ .compatible = "renesas,pci-r8a7794", },
 	{ },
 };
 
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index 996327c..e491681 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -295,7 +295,7 @@
 	return 0;
 }
 
-static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc)
+static void xgene_msi_isr(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct xgene_msi_group *msi_groups;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index dd652f2..108a311 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -299,9 +299,10 @@
 	 * Unbound PCI devices are always put in D0, regardless of
 	 * runtime PM status.  During probe, the device is set to
 	 * active and the usage count is incremented.  If the driver
-	 * supports runtime PM, it should call pm_runtime_put_noidle()
-	 * in its probe routine and pm_runtime_get_noresume() in its
-	 * remove routine.
+	 * supports runtime PM, it should call pm_runtime_put_noidle(),
+	 * or any other runtime PM helper function decrementing the usage
+	 * count, in its probe routine and pm_runtime_get_noresume() in
+	 * its remove routine.
 	 */
 	pm_runtime_get_sync(dev);
 	pci_dev->driver = pci_drv;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 0b2be17..8361d27 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -676,15 +676,20 @@
 static void pci_set_bus_msi_domain(struct pci_bus *bus)
 {
 	struct irq_domain *d;
+	struct pci_bus *b;
 
 	/*
-	 * Either bus is the root, and we must obtain it from the
-	 * firmware, or we inherit it from the bridge device.
+	 * The bus can be a root bus, a subordinate bus, or a virtual bus
+	 * created by an SR-IOV device.  Walk up to the first bridge device
+	 * found or derive the domain from the host bridge.
 	 */
-	if (pci_is_root_bus(bus))
-		d = pci_host_bridge_msi_domain(bus);
-	else
-		d = dev_get_msi_domain(&bus->self->dev);
+	for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
+		if (b->self)
+			d = dev_get_msi_domain(&b->self->dev);
+	}
+
+	if (!d)
+		d = pci_host_bridge_msi_domain(b);
 
 	dev_set_msi_domain(&bus->dev, d);
 }
@@ -855,9 +860,6 @@
 			child->bridge_ctl = bctl;
 		}
 
-		/* Read and initialize bridge resources */
-		pci_read_bridge_bases(child);
-
 		cmax = pci_scan_child_bus(child);
 		if (cmax > subordinate)
 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
@@ -918,9 +920,6 @@
 
 		if (!is_cardbus) {
 			child->bridge_ctl = bctl;
-
-			/* Read and initialize bridge resources */
-			pci_read_bridge_bases(child);
 			max = pci_scan_child_bus(child);
 		} else {
 			/*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6a30252..b03373f 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1907,11 +1907,27 @@
 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
 			 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
 
+/*
+ * Quirk non-zero PCI functions to route VPD access through function 0 for
+ * devices that share VPD resources between functions.  The functions are
+ * expected to be identical devices.
+ */
 static void quirk_f0_vpd_link(struct pci_dev *dev)
 {
-	if (!dev->multifunction || !PCI_FUNC(dev->devfn))
+	struct pci_dev *f0;
+
+	if (!PCI_FUNC(dev->devfn))
 		return;
-	dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+
+	f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+	if (!f0)
+		return;
+
+	if (f0->vpd && dev->class == f0->class &&
+	    dev->vendor == f0->vendor && dev->device == f0->device)
+		dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+
+	pci_dev_put(f0);
 }
 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
 			      PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
index 7d9482b..1ca7830 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
@@ -143,7 +143,7 @@
 	return !!(readl(chip->base + offset) & BIT(shift));
 }
 
-static void cygnus_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void cygnus_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct cygnus_gpio *chip = to_cygnus_gpio(gc);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 69723e0..9638a00 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -349,6 +349,9 @@
 	struct pinctrl_gpio_range *range = NULL;
 	struct gpio_chip *chip = gpio_to_chip(gpio);
 
+	if (WARN(!chip, "no gpio_chip for gpio%i?", gpio))
+		return false;
+
 	mutex_lock(&pinctrldev_list_mutex);
 
 	/* Loop over the pin controllers */
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index dac4865..f79ea43 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -425,7 +425,7 @@
 	}
 }
 
-static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void byt_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct irq_data *data = irq_desc_get_irq_data(desc);
 	struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc));
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 2d5d3dd..270c127 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1414,7 +1414,7 @@
 	.flags = IRQCHIP_SKIP_SET_WAKE,
 };
 
-static void chv_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void chv_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index bb377c1..54848b8 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -836,7 +836,7 @@
 	}
 }
 
-static void intel_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void intel_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 7726c6c..1b22f96 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1190,7 +1190,7 @@
 	}
 }
 
-static void mtk_eint_irq_handler(unsigned irq, struct irq_desc *desc)
+static void mtk_eint_irq_handler(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 352ede1..96cf039 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -860,7 +860,7 @@
 	chained_irq_exit(host_chip, desc);
 }
 
-static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void nmk_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
 	struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
@@ -873,7 +873,7 @@
 	__nmk_gpio_irq_handler(desc, status);
 }
 
-static void nmk_gpio_latent_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void nmk_gpio_latent_irq_handler(struct irq_desc *desc)
 {
 	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
 	struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index a5976eb..f6be685 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -530,8 +530,7 @@
 static inline void preflow_handler(struct irq_desc *desc) { }
 #endif
 
-static void adi_gpio_handle_pint_irq(unsigned int inta_irq,
-			struct irq_desc *desc)
+static void adi_gpio_handle_pint_irq(struct irq_desc *desc)
 {
 	u32 request;
 	u32 level_mask, hwirq;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 5e86bb8..3318f1d 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -492,15 +492,15 @@
 	.irq_set_type = amd_gpio_irq_set_type,
 };
 
-static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void amd_gpio_irq_handler(struct irq_desc *desc)
 {
-	unsigned int irq = irq_desc_get_irq(desc);
 	u32 i;
 	u32 off;
 	u32 reg;
 	u32 pin_reg;
 	u64 reg64;
 	int handled = 0;
+	unsigned int irq;
 	unsigned long flags;
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -541,7 +541,7 @@
 	}
 
 	if (handled == 0)
-		handle_bad_irq(irq, desc);
+		handle_bad_irq(desc);
 
 	spin_lock_irqsave(&gpio_dev->lock, flags);
 	reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index bae0012..b0fde0f 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1585,7 +1585,7 @@
 	.irq_set_wake	= gpio_irq_set_wake,
 };
 
-static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void gpio_irq_handler(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct gpio_chip *gpio_chip = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 3731cc6..9c9b889 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -519,7 +519,7 @@
 	.irq_set_type		= u300_gpio_irq_type,
 };
 
-static void u300_gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
+static void u300_gpio_irq_handler(struct irq_desc *desc)
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 	struct irq_chip *parent_chip = irq_desc_get_chip(desc);
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index 461fffc..11f8b83 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -337,9 +337,9 @@
 	pmap->dev = &pdev->dev;
 
 	pmap->pctl = pinctrl_register(pctl_desc, &pdev->dev, pmap);
-	if (!pmap->pctl) {
+	if (IS_ERR(pmap->pctl)) {
 		dev_err(&pdev->dev, "pinctrl driver registration failed\n");
-		return -EINVAL;
+		return PTR_ERR(pmap->pctl);
 	}
 
 	ret = dc_gpiochip_add(pmap, pdev->dev.of_node);
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 3dc2ae1..952b1c6 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1303,20 +1303,18 @@
 	}
 
 	if (type & IRQ_TYPE_LEVEL_MASK)
-		__irq_set_handler_locked(data->irq, handle_level_irq);
+		irq_set_handler_locked(data, handle_level_irq);
 	else
-		__irq_set_handler_locked(data->irq, handle_edge_irq);
+		irq_set_handler_locked(data, handle_edge_irq);
 
 	return 0;
 }
 
-static void pistachio_gpio_irq_handler(unsigned int __irq,
-				       struct irq_desc *desc)
+static void pistachio_gpio_irq_handler(struct irq_desc *desc)
 {
-	unsigned int irq = irq_desc_get_irq(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct pistachio_gpio_bank *bank = gc_to_bank(gc);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned long pending;
 	unsigned int pin;
 
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index c5246c0..88bb707 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -1475,7 +1475,7 @@
  * Interrupt handling
  */
 
-static void rockchip_irq_demux(unsigned int __irq, struct irq_desc *desc)
+static void rockchip_irq_demux(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index bf548c2..ef04b96 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1679,7 +1679,7 @@
  * Use this if you have a separate interrupt for each
  * pinctrl-single instance.
  */
-static void pcs_irq_chain_handler(unsigned int irq, struct irq_desc *desc)
+static void pcs_irq_chain_handler(struct irq_desc *desc)
 {
 	struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip;
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index f8338d2..389526e 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1460,7 +1460,7 @@
 	}
 }
 
-static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void st_gpio_irq_handler(struct irq_desc *desc)
 {
 	/* interrupt dedicated per bank */
 	struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1472,7 +1472,7 @@
 	chained_irq_exit(chip, desc);
 }
 
-static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
+static void st_gpio_irqmux_handler(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct st_pinctrl *info = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 67e08cb..29984b3 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -313,8 +313,7 @@
 
 	/* See if this pctldev has this function */
 	while (selector < nfuncs) {
-		const char *fname = ops->get_function_name(pctldev,
-							   selector);
+		const char *fname = ops->get_function_name(pctldev, selector);
 
 		if (!strcmp(function, fname))
 			return selector;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 492cdd5..a0c7407 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -765,9 +765,8 @@
 	.irq_set_wake   = msm_gpio_irq_set_wake,
 };
 
-static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void msm_gpio_irq_handler(struct irq_desc *desc)
 {
-	unsigned int irq = irq_desc_get_irq(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	const struct msm_pingroup *g;
 	struct msm_pinctrl *pctrl = to_msm_pinctrl(gc);
@@ -795,7 +794,7 @@
 
 	/* No interrupts were flagged */
 	if (handled == 0)
-		handle_bad_irq(irq, desc);
+		handle_bad_irq(desc);
 
 	chained_irq_exit(chip, desc);
 }
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index c978b31..e1a3721 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -723,9 +723,9 @@
 #endif
 
 	pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
-	if (!pctrl->pctrl) {
+	if (IS_ERR(pctrl->pctrl)) {
 		dev_err(&pdev->dev, "couldn't register pm8xxx gpio driver\n");
-		return -ENODEV;
+		return PTR_ERR(pctrl->pctrl);
 	}
 
 	pctrl->chip = pm8xxx_gpio_template;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 2d1b69f..6652b8d 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -814,9 +814,9 @@
 #endif
 
 	pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
-	if (!pctrl->pctrl) {
+	if (IS_ERR(pctrl->pctrl)) {
 		dev_err(&pdev->dev, "couldn't register pm8xxx mpp driver\n");
-		return -ENODEV;
+		return PTR_ERR(pctrl->pctrl);
 	}
 
 	pctrl->chip = pm8xxx_mpp_template;
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 5f45caa..71ccf6a 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -419,7 +419,7 @@
 };
 
 /* interrupt handler for wakeup interrupts 0..15 */
-static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
+static void exynos_irq_eint0_15(struct irq_desc *desc)
 {
 	struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc);
 	struct samsung_pin_bank *bank = eintd->bank;
@@ -451,7 +451,7 @@
 }
 
 /* interrupt handler for wakeup interrupt 16 */
-static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
+static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 019844d..3d92f82 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -240,7 +240,7 @@
 	.irq_set_type	= s3c24xx_eint_type,
 };
 
-static void s3c2410_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
+static void s3c2410_demux_eint0_3(struct irq_desc *desc)
 {
 	struct irq_data *data = irq_desc_get_irq_data(desc);
 	struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
@@ -295,7 +295,7 @@
 	.irq_set_type	= s3c24xx_eint_type,
 };
 
-static void s3c2412_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
+static void s3c2412_demux_eint0_3(struct irq_desc *desc)
 {
 	struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
 	struct irq_data *data = irq_desc_get_irq_data(desc);
@@ -361,7 +361,7 @@
 				      u32 offset, u32 range)
 {
 	struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc);
-	struct irq_chip *chip = irq_desc_get_irq_chip(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct samsung_pinctrl_drv_data *d = data->drvdata;
 	unsigned int pend, mask;
 
@@ -388,12 +388,12 @@
 	chained_irq_exit(chip, desc);
 }
 
-static void s3c24xx_demux_eint4_7(unsigned int irq, struct irq_desc *desc)
+static void s3c24xx_demux_eint4_7(struct irq_desc *desc)
 {
 	s3c24xx_demux_eint(desc, 0, 0xf0);
 }
 
-static void s3c24xx_demux_eint8_23(unsigned int irq, struct irq_desc *desc)
+static void s3c24xx_demux_eint8_23(struct irq_desc *desc)
 {
 	s3c24xx_demux_eint(desc, 8, 0xffff00);
 }
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index f5ea40a..43407ab 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -407,7 +407,7 @@
 	.xlate	= irq_domain_xlate_twocell,
 };
 
-static void s3c64xx_eint_gpio_irq(unsigned int irq, struct irq_desc *desc)
+static void s3c64xx_eint_gpio_irq(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc);
@@ -631,22 +631,22 @@
 	chained_irq_exit(chip, desc);
 }
 
-static void s3c64xx_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
+static void s3c64xx_demux_eint0_3(struct irq_desc *desc)
 {
 	s3c64xx_irq_demux_eint(desc, 0xf);
 }
 
-static void s3c64xx_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
+static void s3c64xx_demux_eint4_11(struct irq_desc *desc)
 {
 	s3c64xx_irq_demux_eint(desc, 0xff0);
 }
 
-static void s3c64xx_demux_eint12_19(unsigned int irq, struct irq_desc *desc)
+static void s3c64xx_demux_eint12_19(struct irq_desc *desc)
 {
 	s3c64xx_irq_demux_eint(desc, 0xff000);
 }
 
-static void s3c64xx_demux_eint20_27(unsigned int irq, struct irq_desc *desc)
+static void s3c64xx_demux_eint20_27(struct irq_desc *desc)
 {
 	s3c64xx_irq_demux_eint(desc, 0xff00000);
 }
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 9df0c5f..0d24d9e 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -4489,7 +4489,7 @@
 	.irq_set_type = atlas7_gpio_irq_type,
 };
 
-static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
+static void atlas7_gpio_handle_irq(struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct atlas7_gpio_chip *a7gc = to_atlas7_gpio(gc);
@@ -4512,7 +4512,7 @@
 	if (!status) {
 		pr_warn("%s: gpio [%s] status %#x no interrupt is flaged\n",
 			__func__, gc->label, status);
-		handle_bad_irq(irq, desc);
+		handle_bad_irq(desc);
 		return;
 	}
 
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index f8bd9fb..2a8d697 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -545,7 +545,7 @@
 	.irq_set_type = sirfsoc_gpio_irq_type,
 };
 
-static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
+static void sirfsoc_gpio_handle_irq(struct irq_desc *desc)
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -570,7 +570,7 @@
 		printk(KERN_WARNING
 			"%s: gpio id %d status %#x no interrupt is flagged\n",
 			__func__, bank->id, status);
-		handle_bad_irq(irq, desc);
+		handle_bad_irq(desc);
 		return;
 	}
 
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index ae8f29f..1f0af25 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -356,7 +356,7 @@
 	.irq_set_type	= plgpio_irq_set_type,
 };
 
-static void plgpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void plgpio_irq_handler(struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct plgpio *plgpio = container_of(gc, struct plgpio, chip);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index fb4669c0..38e0c7b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -617,13 +617,11 @@
 	spin_lock_irqsave(&pctl->lock, flags);
 
 	if (type & IRQ_TYPE_LEVEL_MASK)
-		__irq_set_chip_handler_name_locked(d->irq,
-						   &sunxi_pinctrl_level_irq_chip,
-						   handle_fasteoi_irq, NULL);
+		irq_set_chip_handler_name_locked(d, &sunxi_pinctrl_level_irq_chip,
+						 handle_fasteoi_irq, NULL);
 	else
-		__irq_set_chip_handler_name_locked(d->irq,
-						   &sunxi_pinctrl_edge_irq_chip,
-						   handle_edge_irq, NULL);
+		irq_set_chip_handler_name_locked(d, &sunxi_pinctrl_edge_irq_chip,
+						 handle_edge_irq, NULL);
 
 	regval = readl(pctl->membase + reg);
 	regval &= ~(IRQ_CFG_IRQ_MASK << index);
@@ -742,7 +740,7 @@
 	.xlate		= sunxi_pinctrl_irq_of_xlate,
 };
 
-static void sunxi_pinctrl_irq_handler(unsigned __irq, struct irq_desc *desc)
+static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 1ef02da..460fa67 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -346,8 +346,7 @@
  * as late as the polling interval is since we can't do that in the respective
  * accessors of the module parameters.
  */
-static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal,
-			       unsigned long *t)
+static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal, int *t)
 {
 	int temp, err = 0;
 
@@ -453,7 +452,7 @@
 }
 
 static int acerhdf_get_trip_hyst(struct thermal_zone_device *thermal, int trip,
-				 unsigned long *temp)
+				 int *temp)
 {
 	if (trip != 0)
 		return -EINVAL;
@@ -464,7 +463,7 @@
 }
 
 static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip,
-				 unsigned long *temp)
+				 int *temp)
 {
 	if (trip == 0)
 		*temp = fanon;
@@ -477,7 +476,7 @@
 }
 
 static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal,
-				 unsigned long *temperature)
+				 int *temperature)
 {
 	*temperature = ACERHDF_TEMP_CRIT;
 	return 0;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index abdaed3..131fee2 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -128,6 +128,24 @@
 	},
 	{
 		.callback = dmi_matched,
+		.ident = "ASUSTeK COMPUTER INC. X456UA",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"),
+		},
+		.driver_data = &quirk_asus_wapf4,
+	},
+	{
+		.callback = dmi_matched,
+		.ident = "ASUSTeK COMPUTER INC. X456UF",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"),
+		},
+		.driver_data = &quirk_asus_wapf4,
+	},
+	{
+		.callback = dmi_matched,
 		.ident = "ASUSTeK COMPUTER INC. X501U",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 0669731..fb4dd7b 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -54,8 +54,9 @@
 #define HPWMI_HARDWARE_QUERY 0x4
 #define HPWMI_WIRELESS_QUERY 0x5
 #define HPWMI_BIOS_QUERY 0x9
+#define HPWMI_FEATURE_QUERY 0xb
 #define HPWMI_HOTKEY_QUERY 0xc
-#define HPWMI_FEATURE_QUERY 0xd
+#define HPWMI_FEATURE2_QUERY 0xd
 #define HPWMI_WIRELESS2_QUERY 0x1b
 #define HPWMI_POSTCODEERROR_QUERY 0x2a
 
@@ -295,25 +296,33 @@
 	return (state & 0x4) ? 1 : 0;
 }
 
-static int __init hp_wmi_bios_2009_later(void)
+static int __init hp_wmi_bios_2008_later(void)
 {
 	int state = 0;
 	int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state,
 				       sizeof(state), sizeof(state));
-	if (ret)
-		return ret;
+	if (!ret)
+		return 1;
 
-	return (state & 0x10) ? 1 : 0;
+	return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
 }
 
-static int hp_wmi_enable_hotkeys(void)
+static int __init hp_wmi_bios_2009_later(void)
 {
-	int ret;
-	int query = 0x6e;
+	int state = 0;
+	int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, 0, &state,
+				       sizeof(state), sizeof(state));
+	if (!ret)
+		return 1;
 
-	ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
-				   0);
+	return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
+}
 
+static int __init hp_wmi_enable_hotkeys(void)
+{
+	int value = 0x6e;
+	int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value,
+				       sizeof(value), 0);
 	if (ret)
 		return -EINVAL;
 	return 0;
@@ -663,7 +672,7 @@
 			    hp_wmi_tablet_state());
 	input_sync(hp_wmi_input_dev);
 
-	if (hp_wmi_bios_2009_later() == 4)
+	if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
 		hp_wmi_enable_hotkeys();
 
 	status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 0944e83..9f713b8 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -132,7 +132,7 @@
  * to achieve very close approximate temp value with less than
  * 0.5C error
  */
-static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
+static int adc_to_temp(int direct, uint16_t adc_val, int *tp)
 {
 	int temp;
 
@@ -174,14 +174,13 @@
  *
  * Can sleep
  */
-static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp)
+static int mid_read_temp(struct thermal_zone_device *tzd, int *temp)
 {
 	struct thermal_device_info *td_info = tzd->devdata;
 	uint16_t adc_val, addr;
 	uint8_t data = 0;
 	int ret;
-	unsigned long curr_temp;
-
+	int curr_temp;
 
 	addr = td_info->chnl_addr;
 
@@ -453,7 +452,7 @@
  *
  * Can sleep
  */
-static int read_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp)
+static int read_curr_temp(struct thermal_zone_device *tzd, int *temp)
 {
 	WARN_ON(tzd == NULL);
 	return mid_read_temp(tzd, temp);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 6740c51..f2372f4 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -938,7 +938,7 @@
 	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
 
-	return result = TOS_SUCCESS ? 0 : -EIO;
+	return result == TOS_SUCCESS ? 0 : -EIO;
 }
 
 static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state)
@@ -2398,11 +2398,9 @@
 	if (error)
 		return error;
 
-	error = toshiba_hotkey_event_type_get(dev, &events_type);
-	if (error) {
-		pr_err("Unable to query Hotkey Event Type\n");
-		return error;
-	}
+	if (toshiba_hotkey_event_type_get(dev, &events_type))
+		pr_notice("Unable to query Hotkey Event Type\n");
+
 	dev->hotkey_event_type = events_type;
 
 	dev->hotkey_dev = input_allocate_device();
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index aac4757..eb391a2 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -194,34 +194,6 @@
 	return true;
 }
 
-/*
- * Convert a raw GUID to the ACII string representation
- */
-static int wmi_gtoa(const char *in, char *out)
-{
-	int i;
-
-	for (i = 3; i >= 0; i--)
-		out += sprintf(out, "%02X", in[i] & 0xFF);
-
-	out += sprintf(out, "-");
-	out += sprintf(out, "%02X", in[5] & 0xFF);
-	out += sprintf(out, "%02X", in[4] & 0xFF);
-	out += sprintf(out, "-");
-	out += sprintf(out, "%02X", in[7] & 0xFF);
-	out += sprintf(out, "%02X", in[6] & 0xFF);
-	out += sprintf(out, "-");
-	out += sprintf(out, "%02X", in[8] & 0xFF);
-	out += sprintf(out, "%02X", in[9] & 0xFF);
-	out += sprintf(out, "-");
-
-	for (i = 10; i <= 15; i++)
-		out += sprintf(out, "%02X", in[i] & 0xFF);
-
-	*out = '\0';
-	return 0;
-}
-
 static bool find_guid(const char *guid_string, struct wmi_block **out)
 {
 	char tmp[16], guid_input[16];
@@ -457,11 +429,7 @@
 
 static void wmi_dump_wdg(const struct guid_block *g)
 {
-	char guid_string[37];
-
-	wmi_gtoa(g->guid, guid_string);
-
-	pr_info("%s:\n", guid_string);
+	pr_info("%pUL:\n", g->guid);
 	pr_info("\tobject_id: %c%c\n", g->object_id[0], g->object_id[1]);
 	pr_info("\tnotify_id: %02X\n", g->notify_id);
 	pr_info("\treserved: %02X\n", g->reserved);
@@ -661,7 +629,6 @@
 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 			     char *buf)
 {
-	char guid_string[37];
 	struct wmi_block *wblock;
 
 	wblock = dev_get_drvdata(dev);
@@ -670,9 +637,7 @@
 		return strlen(buf);
 	}
 
-	wmi_gtoa(wblock->gblock.guid, guid_string);
-
-	return sprintf(buf, "wmi:%s\n", guid_string);
+	return sprintf(buf, "wmi:%pUL\n", wblock->gblock.guid);
 }
 static DEVICE_ATTR_RO(modalias);
 
@@ -695,7 +660,7 @@
 	if (!wblock)
 		return -ENOMEM;
 
-	wmi_gtoa(wblock->gblock.guid, guid_string);
+	sprintf(guid_string, "%pUL", wblock->gblock.guid);
 
 	strcpy(&env->buf[env->buflen - 1], "wmi:");
 	memcpy(&env->buf[env->buflen - 1 + 4], guid_string, 36);
@@ -721,12 +686,9 @@
 static int wmi_create_device(const struct guid_block *gblock,
 			     struct wmi_block *wblock, acpi_handle handle)
 {
-	char guid_string[37];
-
 	wblock->dev.class = &wmi_class;
 
-	wmi_gtoa(gblock->guid, guid_string);
-	dev_set_name(&wblock->dev, "%s", guid_string);
+	dev_set_name(&wblock->dev, "%pUL", gblock->guid);
 
 	dev_set_drvdata(&wblock->dev, wblock);
 
@@ -877,7 +839,6 @@
 	struct guid_block *block;
 	struct wmi_block *wblock;
 	struct list_head *p;
-	char guid_string[37];
 
 	list_for_each(p, &wmi_block_list) {
 		wblock = list_entry(p, struct wmi_block, list);
@@ -888,8 +849,8 @@
 			if (wblock->handler)
 				wblock->handler(event, wblock->handler_data);
 			if (debug_event) {
-				wmi_gtoa(wblock->gblock.guid, guid_string);
-				pr_info("DEBUG Event GUID: %s\n", guid_string);
+				pr_info("DEBUG Event GUID: %pUL\n",
+					wblock->gblock.guid);
 			}
 
 			acpi_bus_generate_netlink_event(
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 1c202cc..907293e 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -619,7 +619,7 @@
 
 #ifdef CONFIG_THERMAL
 	if (cm->tzd_batt) {
-		ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp);
+		ret = thermal_zone_get_temp(cm->tzd_batt, temp);
 		if (!ret)
 			/* Calibrate temperature unit */
 			*temp /= 100;
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 869284c..456987c 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -557,7 +557,7 @@
 
 #ifdef CONFIG_THERMAL
 static int power_supply_read_temp(struct thermal_zone_device *tzd,
-		unsigned long *temp)
+		int *temp)
 {
 	struct power_supply *psy;
 	union power_supply_propval val;
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index f4f2c1f..74f2d3f 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -91,7 +91,7 @@
 #define TWL4030_MSTATEC_COMPLETE1	0x0b
 #define TWL4030_MSTATEC_COMPLETE4	0x0e
 
-#if IS_ENABLED(CONFIG_TWL4030_MADC)
+#if IS_REACHABLE(CONFIG_TWL4030_MADC)
 /*
  * If AC (Accessory Charger) voltage exceeds 4.5V (MADC 11)
  * then AC is available.
@@ -1057,13 +1057,9 @@
 
 		phynode = of_find_compatible_node(bci->dev->of_node->parent,
 						  NULL, "ti,twl4030-usb");
-		if (phynode) {
+		if (phynode)
 			bci->transceiver = devm_usb_get_phy_by_node(
 				bci->dev, phynode, &bci->usb_nb);
-			if (IS_ERR(bci->transceiver) &&
-			    PTR_ERR(bci->transceiver) == -EPROBE_DEFER)
-				return -EPROBE_DEFER;
-		}
 	}
 
 	/* Enable interrupts now. */
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 738adfa..52ea605 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -318,6 +318,7 @@
 	{ .compatible = "fsl,anatop-regulator", },
 	{ /* end */ }
 };
+MODULE_DEVICE_TABLE(of, of_anatop_regulator_match_tbl);
 
 static struct platform_driver anatop_regulator_driver = {
 	.driver = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7a85ac9..7849187 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1394,15 +1394,15 @@
 		return 0;
 
 	r = regulator_dev_lookup(dev, rdev->supply_name, &ret);
-	if (ret == -ENODEV) {
-		/*
-		 * No supply was specified for this regulator and
-		 * there will never be one.
-		 */
-		return 0;
-	}
-
 	if (!r) {
+		if (ret == -ENODEV) {
+			/*
+			 * No supply was specified for this regulator and
+			 * there will never be one.
+			 */
+			return 0;
+		}
+
 		if (have_full_constraints()) {
 			r = dummy_regulator_rdev;
 		} else {
@@ -1422,11 +1422,10 @@
 		return ret;
 
 	/* Cascade always-on state to supply */
-	if (_regulator_is_enabled(rdev)) {
+	if (_regulator_is_enabled(rdev) && rdev->supply) {
 		ret = regulator_enable(rdev->supply);
 		if (ret < 0) {
-			if (rdev->supply)
-				_regulator_put(rdev->supply);
+			_regulator_put(rdev->supply);
 			return ret;
 		}
 	}
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 464018d..7bba8b7 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -394,6 +394,7 @@
 	{ .compatible = "regulator-gpio", },
 	{},
 };
+MODULE_DEVICE_TABLE(of, regulator_gpio_of_match);
 #endif
 
 static struct platform_driver gpio_regulator_driver = {
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index 4fa7bca..f9d74d6 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -45,6 +45,10 @@
 	int voltage;
 };
 
+struct pbias_of_data {
+	unsigned int offset;
+};
+
 static const unsigned int pbias_volt_table[] = {
 	1800000,
 	3000000
@@ -102,8 +106,35 @@
 };
 #define PBIAS_NUM_REGS	ARRAY_SIZE(pbias_matches)
 
+/* Offset from SCM general area (and syscon) base */
+
+static const struct pbias_of_data pbias_of_data_omap2 = {
+	.offset = 0x230,
+};
+
+static const struct pbias_of_data pbias_of_data_omap3 = {
+	.offset = 0x2b0,
+};
+
+static const struct pbias_of_data pbias_of_data_omap4 = {
+	.offset = 0x60,
+};
+
+static const struct pbias_of_data pbias_of_data_omap5 = {
+	.offset = 0x60,
+};
+
+static const struct pbias_of_data pbias_of_data_dra7 = {
+	.offset = 0xe00,
+};
+
 static const struct of_device_id pbias_of_match[] = {
 	{ .compatible = "ti,pbias-omap", },
+	{ .compatible = "ti,pbias-omap2", .data = &pbias_of_data_omap2, },
+	{ .compatible = "ti,pbias-omap3", .data = &pbias_of_data_omap3, },
+	{ .compatible = "ti,pbias-omap4", .data = &pbias_of_data_omap4, },
+	{ .compatible = "ti,pbias-omap5", .data = &pbias_of_data_omap5, },
+	{ .compatible = "ti,pbias-dra7", .data = &pbias_of_data_dra7, },
 	{},
 };
 MODULE_DEVICE_TABLE(of, pbias_of_match);
@@ -118,6 +149,9 @@
 	const struct pbias_reg_info *info;
 	int ret = 0;
 	int count, idx, data_idx = 0;
+	const struct of_device_id *match;
+	const struct pbias_of_data *data;
+	unsigned int offset;
 
 	count = of_regulator_match(&pdev->dev, np, pbias_matches,
 						PBIAS_NUM_REGS);
@@ -133,6 +167,20 @@
 	if (IS_ERR(syscon))
 		return PTR_ERR(syscon);
 
+	match = of_match_device(of_match_ptr(pbias_of_match), &pdev->dev);
+	if (match && match->data) {
+		data = match->data;
+		offset = data->offset;
+	} else {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!res)
+			return -EINVAL;
+
+		offset = res->start;
+		dev_WARN(&pdev->dev,
+			 "using legacy dt data for pbias offset\n");
+	}
+
 	cfg.regmap = syscon;
 	cfg.dev = &pdev->dev;
 
@@ -145,10 +193,6 @@
 		if (!info)
 			return -ENODEV;
 
-		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-		if (!res)
-			return -EINVAL;
-
 		drvdata[data_idx].syscon = syscon;
 		drvdata[data_idx].info = info;
 		drvdata[data_idx].desc.name = info->name;
@@ -158,9 +202,9 @@
 		drvdata[data_idx].desc.volt_table = pbias_volt_table;
 		drvdata[data_idx].desc.n_voltages = 2;
 		drvdata[data_idx].desc.enable_time = info->enable_time;
-		drvdata[data_idx].desc.vsel_reg = res->start;
+		drvdata[data_idx].desc.vsel_reg = offset;
 		drvdata[data_idx].desc.vsel_mask = info->vmode;
-		drvdata[data_idx].desc.enable_reg = res->start;
+		drvdata[data_idx].desc.enable_reg = offset;
 		drvdata[data_idx].desc.enable_mask = info->enable_mask;
 		drvdata[data_idx].desc.enable_val = info->enable;
 		drvdata[data_idx].desc.disable_val = info->disable_val;
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 7f97223..a02c1b9 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -73,7 +73,7 @@
 };
 
 static struct tps_info tps65218_pmic_regs[] = {
-	TPS65218_INFO(DCDC1, "DCDC1", 850000, 167500),
+	TPS65218_INFO(DCDC1, "DCDC1", 850000, 1675000),
 	TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000),
 	TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000),
 	TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000),
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c
index bed9d3e..c810cbb 100644
--- a/drivers/regulator/vexpress.c
+++ b/drivers/regulator/vexpress.c
@@ -103,6 +103,7 @@
 	{ .compatible = "arm,vexpress-volt", },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, vexpress_regulator_of_match);
 
 static struct platform_driver vexpress_regulator_driver = {
 	.probe = vexpress_regulator_probe,
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index b7d6030..fc94bfd 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -229,7 +229,7 @@
 /******************************************************************************
  *                               IUCV handler                                 *
  *****************************************************************************/
-static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
+static void mon_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
 {
 	struct mon_private *monpriv = path->private;
 
@@ -237,7 +237,7 @@
 	wake_up(&mon_conn_wait_queue);
 }
 
-static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
+static void mon_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
 {
 	struct mon_private *monpriv = path->private;
 
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 9bb48d7..799c152 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -99,8 +99,8 @@
 };
 
 
-static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
-static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
+static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 *ipuser);
+static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 *ipuser);
 static void vmlogrdr_iucv_message_pending(struct iucv_path *,
 					  struct iucv_message *);
 
@@ -160,7 +160,7 @@
 static int recording_class_AB;
 
 
-static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
+static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
 {
 	struct vmlogrdr_priv_t * logptr = path->private;
 
@@ -171,7 +171,7 @@
 }
 
 
-static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
+static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
 {
 	struct vmlogrdr_priv_t * logptr = path->private;
 	u8 reason = (u8) ipuser[8];
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 848e3b6..4bb5262f 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -319,6 +319,8 @@
 	int retries = 0, cc;
 	unsigned long laob = 0;
 
+	WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) ||
+			     !q->u.out.use_cq));
 	if (q->u.out.use_cq && aob != 0) {
 		fc = QDIO_SIGA_WRITEQ;
 		laob = aob;
@@ -329,8 +331,6 @@
 		fc |= QDIO_SIGA_QEBSM_FLAG;
 	}
 again:
-	WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
-		(aob && fc != QDIO_SIGA_WRITEQ));
 	cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
 
 	/* hipersocket busy condition */
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 33f7040d..0ba3a2f 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -149,12 +149,11 @@
 	.pm = &netiucv_pm_ops,
 };
 
-static int netiucv_callback_connreq(struct iucv_path *,
-				    u8 ipvmid[8], u8 ipuser[16]);
-static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
-static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
-static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
-static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
+static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
+static void netiucv_callback_connack(struct iucv_path *, u8 *);
+static void netiucv_callback_connrej(struct iucv_path *, u8 *);
+static void netiucv_callback_connsusp(struct iucv_path *, u8 *);
+static void netiucv_callback_connres(struct iucv_path *, u8 *);
 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
 
@@ -556,8 +555,8 @@
 	fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
 }
 
-static int netiucv_callback_connreq(struct iucv_path *path,
-				    u8 ipvmid[8], u8 ipuser[16])
+static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid,
+				    u8 *ipuser)
 {
 	struct iucv_connection *conn = path->private;
 	struct iucv_event ev;
@@ -587,21 +586,21 @@
 	return rc;
 }
 
-static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
+static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser)
 {
 	struct iucv_connection *conn = path->private;
 
 	fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
 }
 
-static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
+static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser)
 {
 	struct iucv_connection *conn = path->private;
 
 	fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
 }
 
-static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
+static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
 {
 	struct iucv_connection *conn = path->private;
 
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index ba974a2..1766a20 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -18,6 +18,7 @@
 #include <linux/bitops.h>
 #include <linux/seq_file.h>
 #include <linux/ethtool.h>
+#include <linux/hashtable.h>
 
 #include <net/ipv6.h>
 #include <net/if_inet6.h>
@@ -663,9 +664,7 @@
 	char mcl_level[QETH_MCL_LENGTH + 1];
 	int guestlan;
 	int mac_bits;
-	int portname_required;
 	int portno;
-	char portname[9];
 	enum qeth_card_types type;
 	enum qeth_link_types link_type;
 	int is_multicast_different;
@@ -741,11 +740,17 @@
 	unsigned short vid;
 };
 
-struct qeth_mc_mac {
-	struct list_head list;
-	__u8 mc_addr[MAX_ADDR_LEN];
-	unsigned char mc_addrlen;
-	int is_vmac;
+enum qeth_mac_disposition {
+	QETH_DISP_MAC_DELETE = 0,
+	QETH_DISP_MAC_DO_NOTHING = 1,
+	QETH_DISP_MAC_ADD = 2,
+};
+
+struct qeth_mac {
+	u8 mac_addr[OSA_ADDR_LEN];
+	u8 is_uc:1;
+	u8 disp_flag:2;
+	struct hlist_node hnode;
 };
 
 struct qeth_rx {
@@ -792,7 +797,7 @@
 	spinlock_t mclock;
 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 	struct list_head vid_list;
-	struct list_head mc_list;
+	DECLARE_HASHTABLE(mac_htable, 4);
 	struct work_struct kernel_thread_starter;
 	spinlock_t thread_mask_lock;
 	unsigned long thread_start_mask;
@@ -969,6 +974,15 @@
 int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
 void qeth_trace_features(struct qeth_card *);
 void qeth_close_dev(struct qeth_card *);
+int qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
+				 __u16, long);
+int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
+			  long,
+			  int (*reply_cb)(struct qeth_card *,
+					  struct qeth_reply *, unsigned long),
+			  void *);
+int qeth_start_ipa_tx_checksum(struct qeth_card *);
+int qeth_set_rx_csum(struct qeth_card *, int);
 
 /* exports for OSN */
 int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 5e20fba..31ac53f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1982,14 +1982,6 @@
 		goto out;
 	}
 
-/**
- *  * temporary fix for microcode bug
- *   * to revert it,replace OR by AND
- *    */
-	if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
-	     (card->info.type == QETH_CARD_TYPE_OSD))
-		card->info.portname_required = 1;
-
 	memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
 	if (temp != qeth_peer_func_level(card->info.func_level)) {
 		QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
@@ -2360,8 +2352,6 @@
 	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
 	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
 	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
-	memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
-	       card->info.portname, 9);
 	rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
 				    qeth_ulp_enable_cb, NULL);
 	return rc;
@@ -2680,48 +2670,6 @@
 	return rc;
 }
 
-static void qeth_print_status_with_portname(struct qeth_card *card)
-{
-	char dbf_text[15];
-	int i;
-
-	sprintf(dbf_text, "%s", card->info.portname + 1);
-	for (i = 0; i < 8; i++)
-		dbf_text[i] =
-			(char) _ebcasc[(__u8) dbf_text[i]];
-	dbf_text[8] = 0;
-	dev_info(&card->gdev->dev, "Device is a%s card%s%s%s\n"
-	       "with link type %s (portname: %s)\n",
-	       qeth_get_cardname(card),
-	       (card->info.mcl_level[0]) ? " (level: " : "",
-	       (card->info.mcl_level[0]) ? card->info.mcl_level : "",
-	       (card->info.mcl_level[0]) ? ")" : "",
-	       qeth_get_cardname_short(card),
-	       dbf_text);
-
-}
-
-static void qeth_print_status_no_portname(struct qeth_card *card)
-{
-	if (card->info.portname[0])
-		dev_info(&card->gdev->dev, "Device is a%s "
-		       "card%s%s%s\nwith link type %s "
-		       "(no portname needed by interface).\n",
-		       qeth_get_cardname(card),
-		       (card->info.mcl_level[0]) ? " (level: " : "",
-		       (card->info.mcl_level[0]) ? card->info.mcl_level : "",
-		       (card->info.mcl_level[0]) ? ")" : "",
-		       qeth_get_cardname_short(card));
-	else
-		dev_info(&card->gdev->dev, "Device is a%s "
-		       "card%s%s%s\nwith link type %s.\n",
-		       qeth_get_cardname(card),
-		       (card->info.mcl_level[0]) ? " (level: " : "",
-		       (card->info.mcl_level[0]) ? card->info.mcl_level : "",
-		       (card->info.mcl_level[0]) ? ")" : "",
-		       qeth_get_cardname_short(card));
-}
-
 void qeth_print_status_message(struct qeth_card *card)
 {
 	switch (card->info.type) {
@@ -2758,10 +2706,13 @@
 	default:
 		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
 	}
-	if (card->info.portname_required)
-		qeth_print_status_with_portname(card);
-	else
-		qeth_print_status_no_portname(card);
+	dev_info(&card->gdev->dev,
+		 "Device is a%s card%s%s%s\nwith link type %s.\n",
+		 qeth_get_cardname(card),
+		 (card->info.mcl_level[0]) ? " (level: " : "",
+		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
+		 (card->info.mcl_level[0]) ? ")" : "",
+		 qeth_get_cardname_short(card));
 }
 EXPORT_SYMBOL_GPL(qeth_print_status_message);
 
@@ -5027,13 +4978,11 @@
 void qeth_trace_features(struct qeth_card *card)
 {
 	QETH_CARD_TEXT(card, 2, "features");
-	QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.supported_funcs);
-	QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.enabled_funcs);
-	QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.supported_funcs);
-	QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.enabled_funcs);
-	QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.supported_funcs);
-	QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.enabled_funcs);
-	QETH_CARD_TEXT_(card, 2, "%x", card->info.diagass_support);
+	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
+	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
+	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
+	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
+		      sizeof(card->info.diagass_support));
 }
 EXPORT_SYMBOL_GPL(qeth_trace_features);
 
@@ -5132,6 +5081,7 @@
 	}
 
 	card->options.ipa4.supported_funcs = 0;
+	card->options.ipa6.supported_funcs = 0;
 	card->options.adp.supported_funcs = 0;
 	card->options.sbp.supported_funcs = 0;
 	card->info.diagass_support = 0;
@@ -5317,6 +5267,102 @@
 }
 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
 
+static int qeth_setassparms_cb(struct qeth_card *card,
+			       struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "defadpcb");
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (cmd->hdr.return_code == 0) {
+		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+			card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+		if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+			card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
+	}
+	if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
+	    cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
+		card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
+		QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask);
+	}
+	if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
+	    cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
+		card->info.tx_csum_mask =
+			cmd->data.setassparms.data.flags_32bit;
+		QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask);
+	}
+
+	return 0;
+}
+
+static struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
+						  enum qeth_ipa_funcs ipa_func,
+						  __u16 cmd_code, __u16 len,
+						  enum qeth_prot_versions prot)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "getasscm");
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
+
+	if (iob) {
+		cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+		cmd->data.setassparms.hdr.assist_no = ipa_func;
+		cmd->data.setassparms.hdr.length = 8 + len;
+		cmd->data.setassparms.hdr.command_code = cmd_code;
+		cmd->data.setassparms.hdr.return_code = 0;
+		cmd->data.setassparms.hdr.seq_no = 0;
+	}
+
+	return iob;
+}
+
+int qeth_send_setassparms(struct qeth_card *card,
+			  struct qeth_cmd_buffer *iob, __u16 len, long data,
+			  int (*reply_cb)(struct qeth_card *,
+					  struct qeth_reply *, unsigned long),
+			  void *reply_param)
+{
+	int rc;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "sendassp");
+
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	if (len <= sizeof(__u32))
+		cmd->data.setassparms.data.flags_32bit = (__u32) data;
+	else   /* (len > sizeof(__u32)) */
+		memcpy(&cmd->data.setassparms.data, (void *) data, len);
+
+	rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_send_setassparms);
+
+int qeth_send_simple_setassparms(struct qeth_card *card,
+				 enum qeth_ipa_funcs ipa_func,
+				 __u16 cmd_code, long data)
+{
+	int rc;
+	int length = 0;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT(card, 4, "simassp4");
+	if (data)
+		length = sizeof(__u32);
+	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
+				       length, QETH_PROT_IPV4);
+	if (!iob)
+		return -ENOMEM;
+	rc = qeth_send_setassparms(card, iob, length, data,
+				   qeth_setassparms_cb, NULL);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms);
+
 static void qeth_unregister_dbf_views(void)
 {
 	int x;
@@ -6003,6 +6049,75 @@
 }
 EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
 
+static int qeth_send_checksum_command(struct qeth_card *card)
+{
+	int rc;
+
+	rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
+					  IPA_CMD_ASS_START, 0);
+	if (rc) {
+		dev_warn(&card->gdev->dev, "Starting HW checksumming for %s "
+			"failed, using SW checksumming\n",
+			QETH_CARD_IFNAME(card));
+		return rc;
+	}
+	rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
+					  IPA_CMD_ASS_ENABLE,
+					  card->info.csum_mask);
+	if (rc) {
+		dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s "
+			"failed, using SW checksumming\n",
+			QETH_CARD_IFNAME(card));
+		return rc;
+	}
+	return 0;
+}
+
+int qeth_set_rx_csum(struct qeth_card *card, int on)
+{
+	int rc;
+
+	if (on) {
+		rc = qeth_send_checksum_command(card);
+		if (rc)
+			return -EIO;
+		dev_info(&card->gdev->dev,
+			"HW Checksumming (inbound) enabled\n");
+	} else {
+		rc = qeth_send_simple_setassparms(card,
+			IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
+		if (rc)
+			return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_set_rx_csum);
+
+int qeth_start_ipa_tx_checksum(struct qeth_card *card)
+{
+	int rc = 0;
+
+	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
+		return rc;
+	rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
+					  IPA_CMD_ASS_START, 0);
+	if (rc)
+		goto err_out;
+	rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
+					  IPA_CMD_ASS_ENABLE,
+					  card->info.tx_csum_mask);
+	if (rc)
+		goto err_out;
+
+	dev_info(&card->gdev->dev, "HW TX Checksumming enabled\n");
+	return rc;
+err_out:
+	dev_warn(&card->gdev->dev, "Enabling HW TX checksumming for %s "
+		"failed, using SW TX checksumming\n", QETH_CARD_IFNAME(card));
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_start_ipa_tx_checksum);
+
 static int __init qeth_core_init(void)
 {
 	int rc;
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 423bec5..e6e5b96 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -153,52 +153,17 @@
 static ssize_t qeth_dev_portname_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-	char portname[9] = {0, };
-
-	if (!card)
-		return -EINVAL;
-
-	if (card->info.portname_required) {
-		memcpy(portname, card->info.portname + 1, 8);
-		EBCASC(portname, 8);
-		return sprintf(buf, "%s\n", portname);
-	} else
-		return sprintf(buf, "no portname required\n");
+	return sprintf(buf, "no portname required\n");
 }
 
 static ssize_t qeth_dev_portname_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
-	char *tmp;
-	int i, rc = 0;
 
-	if (!card)
-		return -EINVAL;
-
-	mutex_lock(&card->conf_mutex);
-	if ((card->state != CARD_STATE_DOWN) &&
-	    (card->state != CARD_STATE_RECOVER)) {
-		rc = -EPERM;
-		goto out;
-	}
-
-	tmp = strsep((char **) &buf, "\n");
-	if ((strlen(tmp) > 8) || (strlen(tmp) == 0)) {
-		rc = -EINVAL;
-		goto out;
-	}
-
-	card->info.portname[0] = strlen(tmp);
-	/* for beauty reasons */
-	for (i = 1; i < 9; i++)
-		card->info.portname[i] = ' ';
-	strcpy(card->info.portname + 1, tmp);
-	ASCEBC(card->info.portname + 1, 8);
-out:
-	mutex_unlock(&card->conf_mutex);
-	return rc ? rc : count;
+	dev_warn_once(&card->gdev->dev,
+		      "portname is deprecated and is ignored\n");
+	return count;
 }
 
 static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index a855669..8f1b091 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -19,7 +19,9 @@
 #include <linux/mii.h>
 #include <linux/ip.h>
 #include <linux/list.h>
-
+#include <linux/hash.h>
+#include <linux/hashtable.h>
+#include <linux/string.h>
 #include "qeth_core.h"
 #include "qeth_l2.h"
 
@@ -28,7 +30,7 @@
 static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
 static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
 			   enum qeth_ipa_cmds);
-static void qeth_l2_set_multicast_list(struct net_device *);
+static void qeth_l2_set_rx_mode(struct net_device *);
 static int qeth_l2_recover(void *);
 static void qeth_bridgeport_query_support(struct qeth_card *card);
 static void qeth_bridge_state_change(struct qeth_card *card,
@@ -193,49 +195,44 @@
 	return rc;
 }
 
-static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
+static inline u32 qeth_l2_mac_hash(const u8 *addr)
 {
-	struct qeth_mc_mac *mc;
-	int rc;
-
-	mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC);
-
-	if (!mc)
-		return;
-
-	memcpy(mc->mc_addr, mac, OSA_ADDR_LEN);
-	mc->mc_addrlen = OSA_ADDR_LEN;
-	mc->is_vmac = vmac;
-
-	if (vmac) {
-		rc = qeth_setdel_makerc(card,
-			qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC));
-	} else {
-		rc = qeth_setdel_makerc(card,
-			qeth_l2_send_setgroupmac(card, mac));
-	}
-
-	if (!rc)
-		list_add_tail(&mc->list, &card->mc_list);
-	else
-		kfree(mc);
+	return get_unaligned((u32 *)(&addr[2]));
 }
 
-static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
+static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
 {
-	struct qeth_mc_mac *mc, *tmp;
+
+	int rc;
+
+	if (mac->is_uc) {
+		rc = qeth_setdel_makerc(card,
+				qeth_l2_send_setdelmac(card, mac->mac_addr,
+						IPA_CMD_SETVMAC));
+	} else {
+		rc = qeth_setdel_makerc(card,
+				qeth_l2_send_setgroupmac(card, mac->mac_addr));
+	}
+	return rc;
+}
+
+static void qeth_l2_del_all_macs(struct qeth_card *card, int del)
+{
+	struct qeth_mac *mac;
+	struct hlist_node *tmp;
+	int i;
 
 	spin_lock_bh(&card->mclock);
-	list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
+	hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
 		if (del) {
-			if (mc->is_vmac)
-				qeth_l2_send_setdelmac(card, mc->mc_addr,
-					IPA_CMD_DELVMAC);
+			if (mac->is_uc)
+				qeth_l2_send_setdelmac(card, mac->mac_addr,
+						IPA_CMD_DELVMAC);
 			else
-				qeth_l2_send_delgroupmac(card, mc->mc_addr);
+				qeth_l2_send_delgroupmac(card, mac->mac_addr);
 		}
-		list_del(&mc->list);
-		kfree(mc);
+		hash_del(&mac->hnode);
+		kfree(mac);
 	}
 	spin_unlock_bh(&card->mclock);
 }
@@ -252,6 +249,23 @@
 	return RTN_UNSPEC;
 }
 
+static inline void qeth_l2_hdr_csum(struct qeth_card *card,
+				    struct qeth_hdr *hdr, struct sk_buff *skb)
+{
+	struct iphdr *iph = ip_hdr(skb);
+
+	/* tcph->check contains already the pseudo hdr checksum
+	 * so just set the header flags
+	 */
+	if (iph->protocol == IPPROTO_UDP)
+		hdr->hdr.l2.flags[1] |= QETH_HDR_EXT_UDP;
+	hdr->hdr.l2.flags[1] |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
+		QETH_HDR_EXT_CSUM_HDR_REQ;
+	iph->check = 0;
+	if (card->options.performance_stats)
+		card->perf_stats.tx_csum++;
+}
+
 static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 			struct sk_buff *skb, int cast_type)
 {
@@ -386,10 +400,42 @@
 		rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
 		kfree(tmpid);
 	}
-	qeth_l2_set_multicast_list(card->dev);
+	qeth_l2_set_rx_mode(card->dev);
 	return rc;
 }
 
+static netdev_features_t qeth_l2_fix_features(struct net_device *dev,
+					      netdev_features_t features)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_DBF_TEXT(SETUP, 2, "fixfeat");
+	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
+		features &= ~NETIF_F_IP_CSUM;
+	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
+		features &= ~NETIF_F_RXCSUM;
+	QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
+	return features;
+}
+
+static int qeth_l2_set_features(struct net_device *dev,
+				netdev_features_t features)
+{
+	struct qeth_card *card = dev->ml_priv;
+	netdev_features_t changed = dev->features ^ features;
+
+	QETH_DBF_TEXT(SETUP, 2, "setfeat");
+	QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
+
+	if (card->state == CARD_STATE_DOWN ||
+	    card->state == CARD_STATE_RECOVER)
+		return 0;
+
+	if (!(changed & NETIF_F_RXCSUM))
+		return 0;
+	return qeth_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0);
+}
+
 static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
 {
 	QETH_DBF_TEXT(SETUP , 2, "stopcard");
@@ -411,7 +457,7 @@
 		card->state = CARD_STATE_SOFTSETUP;
 	}
 	if (card->state == CARD_STATE_SOFTSETUP) {
-		qeth_l2_del_all_mc(card, 0);
+		qeth_l2_del_all_macs(card, 0);
 		qeth_clear_ipacmd_list(card);
 		card->state = CARD_STATE_HARDSETUP;
 	}
@@ -450,11 +496,19 @@
 		case QETH_HEADER_TYPE_LAYER2:
 			skb->pkt_type = PACKET_HOST;
 			skb->protocol = eth_type_trans(skb, skb->dev);
-			skb->ip_summed = CHECKSUM_NONE;
+			if ((card->dev->features & NETIF_F_RXCSUM)
+			   && ((hdr->hdr.l2.flags[1] &
+				(QETH_HDR_EXT_CSUM_HDR_REQ |
+				   QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
+				(QETH_HDR_EXT_CSUM_HDR_REQ |
+				   QETH_HDR_EXT_CSUM_TRANSP_REQ)))
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+			else
+				skb->ip_summed = CHECKSUM_NONE;
 			if (skb->protocol == htons(ETH_P_802_2))
 				*((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
 			len = skb->len;
-			netif_receive_skb(skb);
+			napi_gro_receive(&card->napi, skb);
 			break;
 		case QETH_HEADER_TYPE_OSN:
 			if (card->info.type == QETH_CARD_TYPE_OSN) {
@@ -711,29 +765,91 @@
 		card->options.sbp.role = role;
 		card->info.promisc_mode = promisc_mode;
 	}
+
+}
+/* New MAC address is added to the hash table and marked to be written on card
+ * only if there is not in the hash table storage already
+ *
+*/
+static	void
+qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc)
+{
+	struct qeth_mac *mac;
+
+	hash_for_each_possible(card->mac_htable, mac, hnode,
+			qeth_l2_mac_hash(ha->addr)) {
+		if (is_uc == mac->is_uc &&
+		    !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) {
+			mac->disp_flag = QETH_DISP_MAC_DO_NOTHING;
+			return;
+		}
+	}
+
+	mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
+
+	if (!mac)
+		return;
+
+	memcpy(mac->mac_addr, ha->addr, OSA_ADDR_LEN);
+	mac->is_uc = is_uc;
+	mac->disp_flag = QETH_DISP_MAC_ADD;
+
+	hash_add(card->mac_htable, &mac->hnode,
+			qeth_l2_mac_hash(mac->mac_addr));
+
 }
 
-static void qeth_l2_set_multicast_list(struct net_device *dev)
+static void qeth_l2_set_rx_mode(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
 	struct netdev_hw_addr *ha;
+	struct qeth_mac *mac;
+	struct hlist_node *tmp;
+	int i;
+	int rc;
 
 	if (card->info.type == QETH_CARD_TYPE_OSN)
-		return ;
+		return;
 
 	QETH_CARD_TEXT(card, 3, "setmulti");
 	if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
 	    (card->state != CARD_STATE_UP))
 		return;
-	qeth_l2_del_all_mc(card, 1);
+
 	spin_lock_bh(&card->mclock);
+
 	netdev_for_each_mc_addr(ha, dev)
-		qeth_l2_add_mc(card, ha->addr, 0);
+		qeth_l2_add_mac(card, ha, 0);
 
 	netdev_for_each_uc_addr(ha, dev)
-		qeth_l2_add_mc(card, ha->addr, 1);
+		qeth_l2_add_mac(card, ha, 1);
+
+	hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
+		if (mac->disp_flag == QETH_DISP_MAC_DELETE) {
+			if (!mac->is_uc)
+				rc = qeth_l2_send_delgroupmac(card,
+						mac->mac_addr);
+			else {
+				rc = qeth_l2_send_setdelmac(card, mac->mac_addr,
+						IPA_CMD_DELVMAC);
+			}
+
+			hash_del(&mac->hnode);
+			kfree(mac);
+
+		} else if (mac->disp_flag == QETH_DISP_MAC_ADD) {
+			rc = qeth_l2_write_mac(card, mac);
+			if (rc) {
+				hash_del(&mac->hnode);
+				kfree(mac);
+			} else
+				mac->disp_flag = QETH_DISP_MAC_DELETE;
+		} else
+			mac->disp_flag = QETH_DISP_MAC_DELETE;
+	}
 
 	spin_unlock_bh(&card->mclock);
+
 	if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
 		qeth_setadp_promisc_mode(card);
 	else
@@ -803,6 +919,8 @@
 						sizeof(struct qeth_hdr));
 			skb_set_mac_header(new_skb, sizeof(struct qeth_hdr));
 			qeth_l2_fill_header(card, hdr, new_skb, cast_type);
+			if (new_skb->ip_summed == CHECKSUM_PARTIAL)
+				qeth_l2_hdr_csum(card, hdr, new_skb);
 		}
 	}
 
@@ -915,7 +1033,7 @@
 
 	qeth_l2_create_device_attributes(&gdev->dev);
 	INIT_LIST_HEAD(&card->vid_list);
-	INIT_LIST_HEAD(&card->mc_list);
+	hash_init(card->mac_htable);
 	card->options.layer2 = 1;
 	card->info.hwtrap = 0;
 	return 0;
@@ -961,13 +1079,15 @@
 	.ndo_get_stats		= qeth_get_stats,
 	.ndo_start_xmit		= qeth_l2_hard_start_xmit,
 	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_rx_mode	= qeth_l2_set_multicast_list,
+	.ndo_set_rx_mode	= qeth_l2_set_rx_mode,
 	.ndo_do_ioctl	   	= qeth_l2_do_ioctl,
 	.ndo_set_mac_address    = qeth_l2_set_mac_address,
 	.ndo_change_mtu	   	= qeth_change_mtu,
 	.ndo_vlan_rx_add_vid	= qeth_l2_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid   = qeth_l2_vlan_rx_kill_vid,
 	.ndo_tx_timeout	   	= qeth_tx_timeout,
+	.ndo_fix_features	= qeth_l2_fix_features,
+	.ndo_set_features	= qeth_l2_set_features
 };
 
 static int qeth_l2_setup_netdev(struct qeth_card *card)
@@ -997,6 +1117,11 @@
 		(card->info.type != QETH_CARD_TYPE_OSN) ?
 		&qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
 	card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+	if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
+		card->dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+		/* Turn on RX offloading per default */
+		card->dev->features |= NETIF_F_RXCSUM;
+	}
 	card->info.broadcast_capable = 1;
 	qeth_l2_request_initial_mac(card);
 	SET_NETDEV_DEV(card->dev, &card->gdev->dev);
@@ -1004,6 +1129,17 @@
 	return register_netdev(card->dev);
 }
 
+static int qeth_l2_start_ipassists(struct qeth_card *card)
+{
+	/* configure isolation level */
+	if (qeth_set_access_ctrl_online(card, 0))
+		return -ENODEV;
+	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
+		qeth_set_rx_csum(card, 1);
+	qeth_start_ipa_tx_checksum(card);
+	return 0;
+}
+
 static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
@@ -1069,12 +1205,8 @@
 contin:
 	if ((card->info.type == QETH_CARD_TYPE_OSD) ||
 	    (card->info.type == QETH_CARD_TYPE_OSX)) {
-		/* configure isolation level */
-		rc = qeth_set_access_ctrl_online(card, 0);
-		if (rc) {
-			rc = -ENODEV;
+		if (qeth_l2_start_ipassists(card))
 			goto out_remove;
-		}
 	}
 
 	if (card->info.type != QETH_CARD_TYPE_OSN &&
@@ -1106,7 +1238,7 @@
 			rtnl_unlock();
 		}
 		/* this also sets saved unicast addresses */
-		qeth_l2_set_multicast_list(card->dev);
+		qeth_l2_set_rx_mode(card->dev);
 	}
 	/* let user_space know that device is online */
 	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -1452,8 +1584,8 @@
 			env[i] = str[i]; i++;
 		}
 		if (code & IPA_ADDR_CHANGE_CODE_MACADDR) {
-			snprintf(str[i], sizeof(str[i]), "MAC=%pM6",
-				&addr_lnid->mac);
+			snprintf(str[i], sizeof(str[i]), "MAC=%pM",
+				addr_lnid->mac);
 			env[i] = str[i]; i++;
 		}
 		snprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x",
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 52673cd..692db49 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -109,7 +109,7 @@
 	return qeth_bridge_port_role_state_show(dev, attr, buf, 1);
 }
 
-static DEVICE_ATTR(bridge_state, 0644, qeth_bridge_port_state_show,
+static DEVICE_ATTR(bridge_state, 0444, qeth_bridge_port_state_show,
 		   NULL);
 
 static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index a1aaa36..543960e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1065,27 +1065,6 @@
 	return iob;
 }
 
-static int qeth_l3_send_setassparms(struct qeth_card *card,
-	struct qeth_cmd_buffer *iob, __u16 len, long data,
-	int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
-		unsigned long),
-	void *reply_param)
-{
-	int rc;
-	struct qeth_ipa_cmd *cmd;
-
-	QETH_CARD_TEXT(card, 4, "sendassp");
-
-	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-	if (len <= sizeof(__u32))
-		cmd->data.setassparms.data.flags_32bit = (__u32) data;
-	else   /* (len > sizeof(__u32)) */
-		memcpy(&cmd->data.setassparms.data, (void *) data, len);
-
-	rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
-	return rc;
-}
-
 #ifdef CONFIG_QETH_IPV6
 static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
 		enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
@@ -1098,31 +1077,12 @@
 				       0, QETH_PROT_IPV6);
 	if (!iob)
 		return -ENOMEM;
-	rc = qeth_l3_send_setassparms(card, iob, 0, 0,
+	rc = qeth_send_setassparms(card, iob, 0, 0,
 				   qeth_l3_default_setassparms_cb, NULL);
 	return rc;
 }
 #endif
 
-static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
-		enum qeth_ipa_funcs ipa_func, __u16 cmd_code, long data)
-{
-	int rc;
-	int length = 0;
-	struct qeth_cmd_buffer *iob;
-
-	QETH_CARD_TEXT(card, 4, "simassp4");
-	if (data)
-		length = sizeof(__u32);
-	iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
-				       length, QETH_PROT_IPV4);
-	if (!iob)
-		return -ENOMEM;
-	rc = qeth_l3_send_setassparms(card, iob, length, data,
-				   qeth_l3_default_setassparms_cb, NULL);
-	return rc;
-}
-
 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
 {
 	int rc;
@@ -1135,8 +1095,8 @@
 			QETH_CARD_IFNAME(card));
 		return 0;
 	}
-	rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
-					IPA_CMD_ASS_START, 0);
+	rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
+					  IPA_CMD_ASS_START, 0);
 	if (rc) {
 		dev_warn(&card->gdev->dev,
 			"Starting ARP processing support for %s failed\n",
@@ -1158,7 +1118,7 @@
 		return  -EOPNOTSUPP;
 	}
 
-	rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
+	rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
 					  IPA_CMD_ASS_START, 0);
 	if (rc) {
 		dev_warn(&card->gdev->dev,
@@ -1183,7 +1143,7 @@
 		return -EOPNOTSUPP;
 	}
 
-	rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC,
+	rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
 					  IPA_CMD_ASS_START, 0);
 	if (rc)
 		dev_warn(&card->gdev->dev,
@@ -1204,7 +1164,7 @@
 		return -EOPNOTSUPP;
 	}
 
-	rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO,
+	rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
 					  IPA_CMD_ASS_START, 0);
 	if (rc) {
 		dev_warn(&card->gdev->dev,
@@ -1229,7 +1189,7 @@
 		return -EOPNOTSUPP;
 	}
 
-	rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING,
+	rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
 					  IPA_CMD_ASS_START, 0);
 	if (rc) {
 		dev_warn(&card->gdev->dev,
@@ -1259,7 +1219,7 @@
 			QETH_CARD_IFNAME(card));
 		return rc;
 	}
-	rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6,
+	rc = qeth_send_simple_setassparms(card, IPA_IPV6,
 					  IPA_CMD_ASS_START, 3);
 	if (rc) {
 		dev_err(&card->gdev->dev,
@@ -1319,7 +1279,7 @@
 		rc = -EOPNOTSUPP;
 		goto out;
 	}
-	rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
+	rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
 					  IPA_CMD_ASS_START, 0);
 	if (rc) {
 		dev_warn(&card->gdev->dev, "Enabling broadcast filtering for "
@@ -1327,7 +1287,7 @@
 		goto out;
 	}
 
-	rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
+	rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
 					  IPA_CMD_ASS_CONFIGURE, 1);
 	if (rc) {
 		dev_warn(&card->gdev->dev,
@@ -1337,7 +1297,7 @@
 	}
 	card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
 	dev_info(&card->gdev->dev, "Broadcast enabled\n");
-	rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
+	rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
 					  IPA_CMD_ASS_ENABLE, 1);
 	if (rc) {
 		dev_warn(&card->gdev->dev, "Setting up broadcast echo "
@@ -1353,84 +1313,18 @@
 	return rc;
 }
 
-static int qeth_l3_send_checksum_command(struct qeth_card *card)
-{
-	int rc;
-
-	rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
-					  IPA_CMD_ASS_START, 0);
-	if (rc) {
-		dev_warn(&card->gdev->dev, "Starting HW checksumming for %s "
-			"failed, using SW checksumming\n",
-			QETH_CARD_IFNAME(card));
-		return rc;
-	}
-	rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
-					  IPA_CMD_ASS_ENABLE,
-					  card->info.csum_mask);
-	if (rc) {
-		dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s "
-			"failed, using SW checksumming\n",
-			QETH_CARD_IFNAME(card));
-		return rc;
-	}
-	return 0;
-}
-
-static int qeth_l3_set_rx_csum(struct qeth_card *card, int on)
-{
-	int rc = 0;
-
-	if (on) {
-		rc = qeth_l3_send_checksum_command(card);
-		if (rc)
-			return -EIO;
-		dev_info(&card->gdev->dev,
-			"HW Checksumming (inbound) enabled\n");
-	} else {
-		rc = qeth_l3_send_simple_setassparms(card,
-			IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
-		if (rc)
-			return -EIO;
-	}
-
-	return 0;
-}
-
-static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
+static void qeth_l3_start_ipa_checksum(struct qeth_card *card)
 {
 	QETH_CARD_TEXT(card, 3, "strtcsum");
-
-	if (card->dev->features & NETIF_F_RXCSUM) {
-		rtnl_lock();
-		/* force set_features call */
-		card->dev->features &= ~NETIF_F_RXCSUM;
-		netdev_update_features(card->dev);
-		rtnl_unlock();
-	}
-	return 0;
+	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)
+	    && (card->dev->features & NETIF_F_RXCSUM))
+		qeth_set_rx_csum(card, 1);
 }
 
-static int qeth_l3_start_ipa_tx_checksum(struct qeth_card *card)
+static void qeth_l3_start_ipa_tx_checksum(struct qeth_card *card)
 {
-	int rc = 0;
-
-	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
-		return rc;
-	rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
-			  IPA_CMD_ASS_START, 0);
-	if (rc)
-		goto err_out;
-	rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
-			  IPA_CMD_ASS_ENABLE, card->info.tx_csum_mask);
-	if (rc)
-		goto err_out;
-	dev_info(&card->gdev->dev, "HW TX Checksumming enabled\n");
-	return rc;
-err_out:
-	dev_warn(&card->gdev->dev, "Enabling HW TX checksumming for %s "
-		"failed, using SW TX checksumming\n", QETH_CARD_IFNAME(card));
-	return rc;
+	QETH_CARD_TEXT(card, 3, "strttxcs");
+	qeth_start_ipa_tx_checksum(card);
 }
 
 static int qeth_l3_start_ipa_tso(struct qeth_card *card)
@@ -1445,8 +1339,8 @@
 			QETH_CARD_IFNAME(card));
 		rc = -EOPNOTSUPP;
 	} else {
-		rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
-						IPA_CMD_ASS_START, 0);
+		rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
+						  IPA_CMD_ASS_START, 0);
 		if (rc)
 			dev_warn(&card->gdev->dev, "Starting outbound TCP "
 				"segmentation offload for %s failed\n",
@@ -1950,7 +1844,6 @@
 			skb->ip_summed = CHECKSUM_NONE;
 	} else
 		skb->ip_summed = CHECKSUM_NONE;
-
 	return is_vlan;
 }
 
@@ -2287,7 +2180,7 @@
 	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
 		return -EOPNOTSUPP;
 	}
-	rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
+	rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
 					  IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
 					  no_entries);
 	if (rc) {
@@ -2552,7 +2445,7 @@
 				       QETH_PROT_IPV4);
 	if (!iob)
 		return -ENOMEM;
-	rc = qeth_l3_send_setassparms(card, iob,
+	rc = qeth_send_setassparms(card, iob,
 				   sizeof(struct qeth_arp_cache_entry),
 				   (unsigned long) entry,
 				   qeth_l3_default_setassparms_cb, NULL);
@@ -2593,7 +2486,7 @@
 				       QETH_PROT_IPV4);
 	if (!iob)
 		return -ENOMEM;
-	rc = qeth_l3_send_setassparms(card, iob,
+	rc = qeth_send_setassparms(card, iob,
 				   12, (unsigned long)buf,
 				   qeth_l3_default_setassparms_cb, NULL);
 	if (rc) {
@@ -2624,7 +2517,7 @@
 	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
 		return -EOPNOTSUPP;
 	}
-	rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
+	rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
 					  IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
 	if (rc) {
 		tmp = rc;
@@ -3187,7 +3080,6 @@
 		features &= ~NETIF_F_TSO;
 	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
 		features &= ~NETIF_F_RXCSUM;
-
 	return features;
 }
 
@@ -3204,7 +3096,7 @@
 	    card->state == CARD_STATE_RECOVER)
 		return 0;
 
-	return qeth_l3_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0);
+	return qeth_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0);
 }
 
 static const struct ethtool_ops qeth_l3_ethtool_ops = {
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index d8f990b..a851d34 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -49,7 +49,7 @@
 static LIST_HEAD(smsg_list);
 static int iucv_path_connected;
 
-static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
+static int smsg_path_pending(struct iucv_path *, u8 *, u8 *);
 static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
 
 static struct iucv_handler smsg_handler = {
@@ -57,8 +57,7 @@
 	.message_pending = smsg_message_pending,
 };
 
-static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
-			     u8 ipuser[16])
+static int smsg_path_pending(struct iucv_path *path, u8 *ipvmid, u8 *ipuser)
 {
 	if (strncmp(ipvmid, "*MSG    ", 8) != 0)
 		return -EINVAL;
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index f8d8fdb..e9fae30 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -400,12 +400,16 @@
 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
 				   struct ccw1 *ccw, int index)
 {
+	int ret;
+
 	vcdev->config_block->index = index;
 	ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
 	ccw->flags = 0;
 	ccw->count = sizeof(struct vq_config_block);
 	ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
-	ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
+	ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
+	if (ret)
+		return ret;
 	return vcdev->config_block->num;
 }
 
@@ -503,6 +507,10 @@
 		goto out_err;
 	}
 	info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
+	if (info->num < 0) {
+		err = info->num;
+		goto out_err;
+	}
 	size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
 	info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
 	if (info->queue == NULL) {
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 471d0879..1a8c9b5 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -172,6 +172,7 @@
 scsi_mod-$(CONFIG_SCSI_PROC_FS)	+= scsi_proc.o
 scsi_mod-y			+= scsi_trace.o scsi_logging.o
 scsi_mod-$(CONFIG_PM)		+= scsi_pm.o
+scsi_mod-$(CONFIG_SCSI_DH)	+= scsi_dh.o
 
 hv_storvsc-y			:= storvsc_drv.o
 
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index edb43fd..c831e30 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -983,7 +983,7 @@
 {
 	int err, i;
 	u32 offs, size;
-	struct asd_ll_el *el;
+	struct asd_ll_el *el = NULL;
 	struct asd_ctrla_phy_settings *ps;
 	struct asd_ctrla_phy_settings dflt_ps;
 
@@ -1004,6 +1004,7 @@
 
 		size = sizeof(struct asd_ctrla_phy_settings);
 		ps = &dflt_ps;
+		goto out_process;
 	}
 
 	if (size == 0)
@@ -1028,7 +1029,7 @@
 		ASD_DPRINTK("couldn't find ctrla phy settings struct\n");
 		goto out2;
 	}
-
+out_process:
 	err = asd_process_ctrla_phy_settings(asd_ha, ps);
 	if (err) {
 		ASD_DPRINTK("couldn't process ctrla phy settings\n");
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 315d6d6..98f7e8c 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3665,19 +3665,19 @@
 		if (sfp->state_query_cbfn)
 			sfp->state_query_cbfn(sfp->state_query_cbarg,
 					sfp->status);
-			sfp->media = NULL;
-		}
+		sfp->media = NULL;
+	}
 
-		if (sfp->portspeed) {
-			sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
-			if (sfp->state_query_cbfn)
-				sfp->state_query_cbfn(sfp->state_query_cbarg,
-						sfp->status);
-				sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
-		}
+	if (sfp->portspeed) {
+		sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
+		if (sfp->state_query_cbfn)
+			sfp->state_query_cbfn(sfp->state_query_cbarg,
+					sfp->status);
+		sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
+	}
 
-		sfp->state_query_lock = 0;
-		sfp->state_query_cbfn = NULL;
+	sfp->state_query_lock = 0;
+	sfp->state_query_cbfn = NULL;
 }
 
 /*
@@ -3878,7 +3878,7 @@
 		bfa_trc(sfp, sfp->data_valid);
 		if (sfp->data_valid) {
 			u32	size = sizeof(struct sfp_mem_s);
-			u8 *des = (u8 *) &(sfp->sfpmem);
+			u8 *des = (u8 *)(sfp->sfpmem);
 			memcpy(des, sfp->dbuf_kva, size);
 		}
 		/*
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index 69abd0a..e5647d5 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -3,7 +3,7 @@
 #
 
 menuconfig SCSI_DH
-	tristate "SCSI Device Handlers"
+	bool "SCSI Device Handlers"
 	depends on SCSI
 	default n
 	help
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
index e1d2ea0..09866c5 100644
--- a/drivers/scsi/device_handler/Makefile
+++ b/drivers/scsi/device_handler/Makefile
@@ -1,7 +1,6 @@
 #
 # SCSI Device Handler
 #
-obj-$(CONFIG_SCSI_DH)		+= scsi_dh.o
 obj-$(CONFIG_SCSI_DH_RDAC)	+= scsi_dh_rdac.o
 obj-$(CONFIG_SCSI_DH_HP_SW)	+= scsi_dh_hp_sw.o
 obj-$(CONFIG_SCSI_DH_EMC)	+= scsi_dh_emc.o
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
deleted file mode 100644
index 1efebc9..0000000
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ /dev/null
@@ -1,621 +0,0 @@
-/*
- * SCSI device handler infrastruture.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright IBM Corporation, 2007
- *      Authors:
- *               Chandra Seetharaman <sekharan@us.ibm.com>
- *               Mike Anderson <andmike@linux.vnet.ibm.com>
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <scsi/scsi_dh.h>
-#include "../scsi_priv.h"
-
-static DEFINE_SPINLOCK(list_lock);
-static LIST_HEAD(scsi_dh_list);
-
-static struct scsi_device_handler *get_device_handler(const char *name)
-{
-	struct scsi_device_handler *tmp, *found = NULL;
-
-	spin_lock(&list_lock);
-	list_for_each_entry(tmp, &scsi_dh_list, list) {
-		if (!strncmp(tmp->name, name, strlen(tmp->name))) {
-			found = tmp;
-			break;
-		}
-	}
-	spin_unlock(&list_lock);
-	return found;
-}
-
-/*
- * device_handler_match_function - Match a device handler to a device
- * @sdev - SCSI device to be tested
- *
- * Tests @sdev against the match function of all registered device_handler.
- * Returns the found device handler or NULL if not found.
- */
-static struct scsi_device_handler *
-device_handler_match_function(struct scsi_device *sdev)
-{
-	struct scsi_device_handler *tmp_dh, *found_dh = NULL;
-
-	spin_lock(&list_lock);
-	list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
-		if (tmp_dh->match && tmp_dh->match(sdev)) {
-			found_dh = tmp_dh;
-			break;
-		}
-	}
-	spin_unlock(&list_lock);
-	return found_dh;
-}
-
-/*
- * device_handler_match - Attach a device handler to a device
- * @scsi_dh - The device handler to match against or NULL
- * @sdev - SCSI device to be tested against @scsi_dh
- *
- * Tests @sdev against the device handler @scsi_dh or against
- * all registered device_handler if @scsi_dh == NULL.
- * Returns the found device handler or NULL if not found.
- */
-static struct scsi_device_handler *
-device_handler_match(struct scsi_device_handler *scsi_dh,
-		     struct scsi_device *sdev)
-{
-	struct scsi_device_handler *found_dh;
-
-	found_dh = device_handler_match_function(sdev);
-
-	if (scsi_dh && found_dh != scsi_dh)
-		found_dh = NULL;
-
-	return found_dh;
-}
-
-/*
- * scsi_dh_handler_attach - Attach a device handler to a device
- * @sdev - SCSI device the device handler should attach to
- * @scsi_dh - The device handler to attach
- */
-static int scsi_dh_handler_attach(struct scsi_device *sdev,
-				  struct scsi_device_handler *scsi_dh)
-{
-	struct scsi_dh_data *d;
-
-	if (sdev->scsi_dh_data) {
-		if (sdev->scsi_dh_data->scsi_dh != scsi_dh)
-			return -EBUSY;
-
-		kref_get(&sdev->scsi_dh_data->kref);
-		return 0;
-	}
-
-	if (!try_module_get(scsi_dh->module))
-		return -EINVAL;
-
-	d = scsi_dh->attach(sdev);
-	if (IS_ERR(d)) {
-		sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%ld)\n",
-			    scsi_dh->name, PTR_ERR(d));
-		module_put(scsi_dh->module);
-		return PTR_ERR(d);
-	}
-
-	d->scsi_dh = scsi_dh;
-	kref_init(&d->kref);
-	d->sdev = sdev;
-
-	spin_lock_irq(sdev->request_queue->queue_lock);
-	sdev->scsi_dh_data = d;
-	spin_unlock_irq(sdev->request_queue->queue_lock);
-	return 0;
-}
-
-static void __detach_handler (struct kref *kref)
-{
-	struct scsi_dh_data *scsi_dh_data =
-		container_of(kref, struct scsi_dh_data, kref);
-	struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh;
-	struct scsi_device *sdev = scsi_dh_data->sdev;
-
-	scsi_dh->detach(sdev);
-
-	spin_lock_irq(sdev->request_queue->queue_lock);
-	sdev->scsi_dh_data = NULL;
-	spin_unlock_irq(sdev->request_queue->queue_lock);
-
-	sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name);
-	module_put(scsi_dh->module);
-}
-
-/*
- * scsi_dh_handler_detach - Detach a device handler from a device
- * @sdev - SCSI device the device handler should be detached from
- * @scsi_dh - Device handler to be detached
- *
- * Detach from a device handler. If a device handler is specified,
- * only detach if the currently attached handler matches @scsi_dh.
- */
-static void scsi_dh_handler_detach(struct scsi_device *sdev,
-				   struct scsi_device_handler *scsi_dh)
-{
-	if (!sdev->scsi_dh_data)
-		return;
-
-	if (scsi_dh && scsi_dh != sdev->scsi_dh_data->scsi_dh)
-		return;
-
-	if (!scsi_dh)
-		scsi_dh = sdev->scsi_dh_data->scsi_dh;
-
-	if (scsi_dh)
-		kref_put(&sdev->scsi_dh_data->kref, __detach_handler);
-}
-
-/*
- * Functions for sysfs attribute 'dh_state'
- */
-static ssize_t
-store_dh_state(struct device *dev, struct device_attribute *attr,
-	       const char *buf, size_t count)
-{
-	struct scsi_device *sdev = to_scsi_device(dev);
-	struct scsi_device_handler *scsi_dh;
-	int err = -EINVAL;
-
-	if (sdev->sdev_state == SDEV_CANCEL ||
-	    sdev->sdev_state == SDEV_DEL)
-		return -ENODEV;
-
-	if (!sdev->scsi_dh_data) {
-		/*
-		 * Attach to a device handler
-		 */
-		if (!(scsi_dh = get_device_handler(buf)))
-			return err;
-		err = scsi_dh_handler_attach(sdev, scsi_dh);
-	} else {
-		scsi_dh = sdev->scsi_dh_data->scsi_dh;
-		if (!strncmp(buf, "detach", 6)) {
-			/*
-			 * Detach from a device handler
-			 */
-			scsi_dh_handler_detach(sdev, scsi_dh);
-			err = 0;
-		} else if (!strncmp(buf, "activate", 8)) {
-			/*
-			 * Activate a device handler
-			 */
-			if (scsi_dh->activate)
-				err = scsi_dh->activate(sdev, NULL, NULL);
-			else
-				err = 0;
-		}
-	}
-
-	return err<0?err:count;
-}
-
-static ssize_t
-show_dh_state(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct scsi_device *sdev = to_scsi_device(dev);
-
-	if (!sdev->scsi_dh_data)
-		return snprintf(buf, 20, "detached\n");
-
-	return snprintf(buf, 20, "%s\n", sdev->scsi_dh_data->scsi_dh->name);
-}
-
-static struct device_attribute scsi_dh_state_attr =
-	__ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state,
-	       store_dh_state);
-
-/*
- * scsi_dh_sysfs_attr_add - Callback for scsi_init_dh
- */
-static int scsi_dh_sysfs_attr_add(struct device *dev, void *data)
-{
-	struct scsi_device *sdev;
-	int err;
-
-	if (!scsi_is_sdev_device(dev))
-		return 0;
-
-	sdev = to_scsi_device(dev);
-
-	err = device_create_file(&sdev->sdev_gendev,
-				 &scsi_dh_state_attr);
-
-	return 0;
-}
-
-/*
- * scsi_dh_sysfs_attr_remove - Callback for scsi_exit_dh
- */
-static int scsi_dh_sysfs_attr_remove(struct device *dev, void *data)
-{
-	struct scsi_device *sdev;
-
-	if (!scsi_is_sdev_device(dev))
-		return 0;
-
-	sdev = to_scsi_device(dev);
-
-	device_remove_file(&sdev->sdev_gendev,
-			   &scsi_dh_state_attr);
-
-	return 0;
-}
-
-/*
- * scsi_dh_notifier - notifier chain callback
- */
-static int scsi_dh_notifier(struct notifier_block *nb,
-			    unsigned long action, void *data)
-{
-	struct device *dev = data;
-	struct scsi_device *sdev;
-	int err = 0;
-	struct scsi_device_handler *devinfo = NULL;
-
-	if (!scsi_is_sdev_device(dev))
-		return 0;
-
-	sdev = to_scsi_device(dev);
-
-	if (action == BUS_NOTIFY_ADD_DEVICE) {
-		err = device_create_file(dev, &scsi_dh_state_attr);
-		/* don't care about err */
-		devinfo = device_handler_match(NULL, sdev);
-		if (devinfo)
-			err = scsi_dh_handler_attach(sdev, devinfo);
-	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
-		device_remove_file(dev, &scsi_dh_state_attr);
-		scsi_dh_handler_detach(sdev, NULL);
-	}
-	return err;
-}
-
-/*
- * scsi_dh_notifier_add - Callback for scsi_register_device_handler
- */
-static int scsi_dh_notifier_add(struct device *dev, void *data)
-{
-	struct scsi_device_handler *scsi_dh = data;
-	struct scsi_device *sdev;
-
-	if (!scsi_is_sdev_device(dev))
-		return 0;
-
-	if (!get_device(dev))
-		return 0;
-
-	sdev = to_scsi_device(dev);
-
-	if (device_handler_match(scsi_dh, sdev))
-		scsi_dh_handler_attach(sdev, scsi_dh);
-
-	put_device(dev);
-
-	return 0;
-}
-
-/*
- * scsi_dh_notifier_remove - Callback for scsi_unregister_device_handler
- */
-static int scsi_dh_notifier_remove(struct device *dev, void *data)
-{
-	struct scsi_device_handler *scsi_dh = data;
-	struct scsi_device *sdev;
-
-	if (!scsi_is_sdev_device(dev))
-		return 0;
-
-	if (!get_device(dev))
-		return 0;
-
-	sdev = to_scsi_device(dev);
-
-	scsi_dh_handler_detach(sdev, scsi_dh);
-
-	put_device(dev);
-
-	return 0;
-}
-
-/*
- * scsi_register_device_handler - register a device handler personality
- *      module.
- * @scsi_dh - device handler to be registered.
- *
- * Returns 0 on success, -EBUSY if handler already registered.
- */
-int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
-{
-
-	if (get_device_handler(scsi_dh->name))
-		return -EBUSY;
-
-	if (!scsi_dh->attach || !scsi_dh->detach)
-		return -EINVAL;
-
-	spin_lock(&list_lock);
-	list_add(&scsi_dh->list, &scsi_dh_list);
-	spin_unlock(&list_lock);
-
-	bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
-	printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
-
-	return SCSI_DH_OK;
-}
-EXPORT_SYMBOL_GPL(scsi_register_device_handler);
-
-/*
- * scsi_unregister_device_handler - register a device handler personality
- *      module.
- * @scsi_dh - device handler to be unregistered.
- *
- * Returns 0 on success, -ENODEV if handler not registered.
- */
-int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
-{
-
-	if (!get_device_handler(scsi_dh->name))
-		return -ENODEV;
-
-	bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
-			 scsi_dh_notifier_remove);
-
-	spin_lock(&list_lock);
-	list_del(&scsi_dh->list);
-	spin_unlock(&list_lock);
-	printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
-
-	return SCSI_DH_OK;
-}
-EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
-
-/*
- * scsi_dh_activate - activate the path associated with the scsi_device
- *      corresponding to the given request queue.
- *     Returns immediately without waiting for activation to be completed.
- * @q    - Request queue that is associated with the scsi_device to be
- *         activated.
- * @fn   - Function to be called upon completion of the activation.
- *         Function fn is called with data (below) and the error code.
- *         Function fn may be called from the same calling context. So,
- *         do not hold the lock in the caller which may be needed in fn.
- * @data - data passed to the function fn upon completion.
- *
- */
-int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
-{
-	int err = 0;
-	unsigned long flags;
-	struct scsi_device *sdev;
-	struct scsi_device_handler *scsi_dh = NULL;
-	struct device *dev = NULL;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	sdev = q->queuedata;
-	if (!sdev) {
-		spin_unlock_irqrestore(q->queue_lock, flags);
-		err = SCSI_DH_NOSYS;
-		if (fn)
-			fn(data, err);
-		return err;
-	}
-
-	if (sdev->scsi_dh_data)
-		scsi_dh = sdev->scsi_dh_data->scsi_dh;
-	dev = get_device(&sdev->sdev_gendev);
-	if (!scsi_dh || !dev ||
-	    sdev->sdev_state == SDEV_CANCEL ||
-	    sdev->sdev_state == SDEV_DEL)
-		err = SCSI_DH_NOSYS;
-	if (sdev->sdev_state == SDEV_OFFLINE)
-		err = SCSI_DH_DEV_OFFLINED;
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
-	if (err) {
-		if (fn)
-			fn(data, err);
-		goto out;
-	}
-
-	if (scsi_dh->activate)
-		err = scsi_dh->activate(sdev, fn, data);
-out:
-	put_device(dev);
-	return err;
-}
-EXPORT_SYMBOL_GPL(scsi_dh_activate);
-
-/*
- * scsi_dh_set_params - set the parameters for the device as per the
- *      string specified in params.
- * @q - Request queue that is associated with the scsi_device for
- *      which the parameters to be set.
- * @params - parameters in the following format
- *      "no_of_params\0param1\0param2\0param3\0...\0"
- *      for example, string for 2 parameters with value 10 and 21
- *      is specified as "2\010\021\0".
- */
-int scsi_dh_set_params(struct request_queue *q, const char *params)
-{
-	int err = -SCSI_DH_NOSYS;
-	unsigned long flags;
-	struct scsi_device *sdev;
-	struct scsi_device_handler *scsi_dh = NULL;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	sdev = q->queuedata;
-	if (sdev && sdev->scsi_dh_data)
-		scsi_dh = sdev->scsi_dh_data->scsi_dh;
-	if (scsi_dh && scsi_dh->set_params && get_device(&sdev->sdev_gendev))
-		err = 0;
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
-	if (err)
-		return err;
-	err = scsi_dh->set_params(sdev, params);
-	put_device(&sdev->sdev_gendev);
-	return err;
-}
-EXPORT_SYMBOL_GPL(scsi_dh_set_params);
-
-/*
- * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for
- *	the given name. FALSE(0) otherwise.
- * @name - name of the device handler.
- */
-int scsi_dh_handler_exist(const char *name)
-{
-	return (get_device_handler(name) != NULL);
-}
-EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
-
-/*
- * scsi_dh_attach - Attach device handler
- * @q - Request queue that is associated with the scsi_device
- *      the handler should be attached to
- * @name - name of the handler to attach
- */
-int scsi_dh_attach(struct request_queue *q, const char *name)
-{
-	unsigned long flags;
-	struct scsi_device *sdev;
-	struct scsi_device_handler *scsi_dh;
-	int err = 0;
-
-	scsi_dh = get_device_handler(name);
-	if (!scsi_dh)
-		return -EINVAL;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	sdev = q->queuedata;
-	if (!sdev || !get_device(&sdev->sdev_gendev))
-		err = -ENODEV;
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
-	if (!err) {
-		err = scsi_dh_handler_attach(sdev, scsi_dh);
-		put_device(&sdev->sdev_gendev);
-	}
-	return err;
-}
-EXPORT_SYMBOL_GPL(scsi_dh_attach);
-
-/*
- * scsi_dh_detach - Detach device handler
- * @q - Request queue that is associated with the scsi_device
- *      the handler should be detached from
- *
- * This function will detach the device handler only
- * if the sdev is not part of the internal list, ie
- * if it has been attached manually.
- */
-void scsi_dh_detach(struct request_queue *q)
-{
-	unsigned long flags;
-	struct scsi_device *sdev;
-	struct scsi_device_handler *scsi_dh = NULL;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	sdev = q->queuedata;
-	if (!sdev || !get_device(&sdev->sdev_gendev))
-		sdev = NULL;
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
-	if (!sdev)
-		return;
-
-	if (sdev->scsi_dh_data) {
-		scsi_dh = sdev->scsi_dh_data->scsi_dh;
-		scsi_dh_handler_detach(sdev, scsi_dh);
-	}
-	put_device(&sdev->sdev_gendev);
-}
-EXPORT_SYMBOL_GPL(scsi_dh_detach);
-
-/*
- * scsi_dh_attached_handler_name - Get attached device handler's name
- * @q - Request queue that is associated with the scsi_device
- *      that may have a device handler attached
- * @gfp - the GFP mask used in the kmalloc() call when allocating memory
- *
- * Returns name of attached handler, NULL if no handler is attached.
- * Caller must take care to free the returned string.
- */
-const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
-{
-	unsigned long flags;
-	struct scsi_device *sdev;
-	const char *handler_name = NULL;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	sdev = q->queuedata;
-	if (!sdev || !get_device(&sdev->sdev_gendev))
-		sdev = NULL;
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
-	if (!sdev)
-		return NULL;
-
-	if (sdev->scsi_dh_data)
-		handler_name = kstrdup(sdev->scsi_dh_data->scsi_dh->name, gfp);
-
-	put_device(&sdev->sdev_gendev);
-	return handler_name;
-}
-EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name);
-
-static struct notifier_block scsi_dh_nb = {
-	.notifier_call = scsi_dh_notifier
-};
-
-static int __init scsi_dh_init(void)
-{
-	int r;
-
-	r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
-
-	if (!r)
-		bus_for_each_dev(&scsi_bus_type, NULL, NULL,
-				 scsi_dh_sysfs_attr_add);
-
-	return r;
-}
-
-static void __exit scsi_dh_exit(void)
-{
-	bus_for_each_dev(&scsi_bus_type, NULL, NULL,
-			 scsi_dh_sysfs_attr_remove);
-	bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
-}
-
-module_init(scsi_dh_init);
-module_exit(scsi_dh_exit);
-
-MODULE_DESCRIPTION("SCSI device handler");
-MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 854b568..cc2773b 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -62,7 +62,6 @@
 #define ALUA_OPTIMIZE_STPG		1
 
 struct alua_dh_data {
-	struct scsi_dh_data	dh_data;
 	int			group_id;
 	int			rel_port;
 	int			tpgs;
@@ -86,11 +85,6 @@
 static char print_alua_state(int);
 static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *);
 
-static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev)
-{
-	return container_of(sdev->scsi_dh_data, struct alua_dh_data, dh_data);
-}
-
 static int realloc_buffer(struct alua_dh_data *h, unsigned len)
 {
 	if (h->buff && h->buff != h->inq)
@@ -708,7 +702,7 @@
  */
 static int alua_set_params(struct scsi_device *sdev, const char *params)
 {
-	struct alua_dh_data *h = get_alua_data(sdev);
+	struct alua_dh_data *h = sdev->handler_data;
 	unsigned int optimize = 0, argc;
 	const char *p = params;
 	int result = SCSI_DH_OK;
@@ -746,7 +740,7 @@
 static int alua_activate(struct scsi_device *sdev,
 			activate_complete fn, void *data)
 {
-	struct alua_dh_data *h = get_alua_data(sdev);
+	struct alua_dh_data *h = sdev->handler_data;
 	int err = SCSI_DH_OK;
 	int stpg = 0;
 
@@ -804,7 +798,7 @@
  */
 static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
 {
-	struct alua_dh_data *h = get_alua_data(sdev);
+	struct alua_dh_data *h = sdev->handler_data;
 	int ret = BLKPREP_OK;
 
 	if (h->state == TPGS_STATE_TRANSITIONING)
@@ -819,23 +813,18 @@
 
 }
 
-static bool alua_match(struct scsi_device *sdev)
-{
-	return (scsi_device_tpgs(sdev) != 0);
-}
-
 /*
  * alua_bus_attach - Attach device handler
  * @sdev: device to be attached to
  */
-static struct scsi_dh_data *alua_bus_attach(struct scsi_device *sdev)
+static int alua_bus_attach(struct scsi_device *sdev)
 {
 	struct alua_dh_data *h;
 	int err;
 
 	h = kzalloc(sizeof(*h) , GFP_KERNEL);
 	if (!h)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 	h->tpgs = TPGS_MODE_UNINITIALIZED;
 	h->state = TPGS_STATE_OPTIMIZED;
 	h->group_id = -1;
@@ -848,11 +837,11 @@
 	if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
 		goto failed;
 
-	sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME);
-	return &h->dh_data;
+	sdev->handler_data = h;
+	return 0;
 failed:
 	kfree(h);
-	return ERR_PTR(-EINVAL);
+	return -EINVAL;
 }
 
 /*
@@ -861,10 +850,11 @@
  */
 static void alua_bus_detach(struct scsi_device *sdev)
 {
-	struct alua_dh_data *h = get_alua_data(sdev);
+	struct alua_dh_data *h = sdev->handler_data;
 
 	if (h->buff && h->inq != h->buff)
 		kfree(h->buff);
+	sdev->handler_data = NULL;
 	kfree(h);
 }
 
@@ -877,7 +867,6 @@
 	.check_sense = alua_check_sense,
 	.activate = alua_activate,
 	.set_params = alua_set_params,
-	.match = alua_match,
 };
 
 static int __init alua_init(void)
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 6ed1caa..e6fb97c 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -72,7 +72,6 @@
 };
 
 struct clariion_dh_data {
-	struct scsi_dh_data dh_data;
 	/*
 	 * Flags:
 	 *  CLARIION_SHORT_TRESPASS
@@ -114,13 +113,6 @@
 	int current_sp;
 };
 
-static inline struct clariion_dh_data
-			*get_clariion_data(struct scsi_device *sdev)
-{
-	return container_of(sdev->scsi_dh_data, struct clariion_dh_data,
-			dh_data);
-}
-
 /*
  * Parse MODE_SELECT cmd reply.
  */
@@ -450,7 +442,7 @@
 
 static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
 {
-	struct clariion_dh_data *h = get_clariion_data(sdev);
+	struct clariion_dh_data *h = sdev->handler_data;
 	int ret = BLKPREP_OK;
 
 	if (h->lun_state != CLARIION_LUN_OWNED) {
@@ -533,7 +525,7 @@
 static int clariion_activate(struct scsi_device *sdev,
 				activate_complete fn, void *data)
 {
-	struct clariion_dh_data *csdev = get_clariion_data(sdev);
+	struct clariion_dh_data *csdev = sdev->handler_data;
 	int result;
 
 	result = clariion_send_inquiry(sdev, csdev);
@@ -574,7 +566,7 @@
  */
 static int clariion_set_params(struct scsi_device *sdev, const char *params)
 {
-	struct clariion_dh_data *csdev = get_clariion_data(sdev);
+	struct clariion_dh_data *csdev = sdev->handler_data;
 	unsigned int hr = 0, st = 0, argc;
 	const char *p = params;
 	int result = SCSI_DH_OK;
@@ -622,42 +614,14 @@
 	return result;
 }
 
-static const struct {
-	char *vendor;
-	char *model;
-} clariion_dev_list[] = {
-	{"DGC", "RAID"},
-	{"DGC", "DISK"},
-	{"DGC", "VRAID"},
-	{NULL, NULL},
-};
-
-static bool clariion_match(struct scsi_device *sdev)
-{
-	int i;
-
-	if (scsi_device_tpgs(sdev))
-		return false;
-
-	for (i = 0; clariion_dev_list[i].vendor; i++) {
-		if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
-			strlen(clariion_dev_list[i].vendor)) &&
-		    !strncmp(sdev->model, clariion_dev_list[i].model,
-			strlen(clariion_dev_list[i].model))) {
-			return true;
-		}
-	}
-	return false;
-}
-
-static struct scsi_dh_data *clariion_bus_attach(struct scsi_device *sdev)
+static int clariion_bus_attach(struct scsi_device *sdev)
 {
 	struct clariion_dh_data *h;
 	int err;
 
 	h = kzalloc(sizeof(*h) , GFP_KERNEL);
 	if (!h)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 	h->lun_state = CLARIION_LUN_UNINITIALIZED;
 	h->default_sp = CLARIION_UNBOUND_LU;
 	h->current_sp = CLARIION_UNBOUND_LU;
@@ -675,18 +639,19 @@
 		    CLARIION_NAME, h->current_sp + 'A',
 		    h->port, lun_state[h->lun_state],
 		    h->default_sp + 'A');
-	return &h->dh_data;
+
+	sdev->handler_data = h;
+	return 0;
 
 failed:
 	kfree(h);
-	return ERR_PTR(-EINVAL);
+	return -EINVAL;
 }
 
 static void clariion_bus_detach(struct scsi_device *sdev)
 {
-	struct clariion_dh_data *h = get_clariion_data(sdev);
-
-	kfree(h);
+	kfree(sdev->handler_data);
+	sdev->handler_data = NULL;
 }
 
 static struct scsi_device_handler clariion_dh = {
@@ -698,7 +663,6 @@
 	.activate	= clariion_activate,
 	.prep_fn	= clariion_prep_fn,
 	.set_params	= clariion_set_params,
-	.match		= clariion_match,
 };
 
 static int __init clariion_init(void)
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 485d995..9406d5f 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -38,7 +38,6 @@
 #define HP_SW_PATH_PASSIVE		1
 
 struct hp_sw_dh_data {
-	struct scsi_dh_data dh_data;
 	unsigned char sense[SCSI_SENSE_BUFFERSIZE];
 	int path_state;
 	int retries;
@@ -50,11 +49,6 @@
 
 static int hp_sw_start_stop(struct hp_sw_dh_data *);
 
-static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
-{
-	return container_of(sdev->scsi_dh_data, struct hp_sw_dh_data, dh_data);
-}
-
 /*
  * tur_done - Handle TEST UNIT READY return status
  * @sdev: sdev the command has been sent to
@@ -267,7 +261,7 @@
 
 static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
 {
-	struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+	struct hp_sw_dh_data *h = sdev->handler_data;
 	int ret = BLKPREP_OK;
 
 	if (h->path_state != HP_SW_PATH_ACTIVE) {
@@ -292,7 +286,7 @@
 				activate_complete fn, void *data)
 {
 	int ret = SCSI_DH_OK;
-	struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+	struct hp_sw_dh_data *h = sdev->handler_data;
 
 	ret = hp_sw_tur(sdev, h);
 
@@ -311,43 +305,14 @@
 	return 0;
 }
 
-static const struct {
-	char *vendor;
-	char *model;
-} hp_sw_dh_data_list[] = {
-	{"COMPAQ", "MSA1000 VOLUME"},
-	{"COMPAQ", "HSV110"},
-	{"HP", "HSV100"},
-	{"DEC", "HSG80"},
-	{NULL, NULL},
-};
-
-static bool hp_sw_match(struct scsi_device *sdev)
-{
-	int i;
-
-	if (scsi_device_tpgs(sdev))
-		return false;
-
-	for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
-		if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
-			strlen(hp_sw_dh_data_list[i].vendor)) &&
-		    !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
-			strlen(hp_sw_dh_data_list[i].model))) {
-			return true;
-		}
-	}
-	return false;
-}
-
-static struct scsi_dh_data *hp_sw_bus_attach(struct scsi_device *sdev)
+static int hp_sw_bus_attach(struct scsi_device *sdev)
 {
 	struct hp_sw_dh_data *h;
 	int ret;
 
 	h = kzalloc(sizeof(*h), GFP_KERNEL);
 	if (!h)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 	h->path_state = HP_SW_PATH_UNINITIALIZED;
 	h->retries = HP_SW_RETRIES;
 	h->sdev = sdev;
@@ -359,17 +324,18 @@
 	sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n",
 		    HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE?
 		    "active":"passive");
-	return &h->dh_data;
+
+	sdev->handler_data = h;
+	return 0;
 failed:
 	kfree(h);
-	return ERR_PTR(-EINVAL);
+	return -EINVAL;
 }
 
 static void hp_sw_bus_detach( struct scsi_device *sdev )
 {
-	struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
-
-	kfree(h);
+	kfree(sdev->handler_data);
+	sdev->handler_data = NULL;
 }
 
 static struct scsi_device_handler hp_sw_dh = {
@@ -379,7 +345,6 @@
 	.detach		= hp_sw_bus_detach,
 	.activate	= hp_sw_activate,
 	.prep_fn	= hp_sw_prep_fn,
-	.match		= hp_sw_match,
 };
 
 static int __init hp_sw_init(void)
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index b46ace3..3613581 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -181,7 +181,6 @@
 };
 
 struct rdac_dh_data {
-	struct scsi_dh_data	dh_data;
 	struct rdac_controller	*ctlr;
 #define UNINITIALIZED_LUN	(1 << 8)
 	unsigned		lun;
@@ -260,11 +259,6 @@
 		sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
 } while (0);
 
-static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
-{
-	return container_of(sdev->scsi_dh_data, struct rdac_dh_data, dh_data);
-}
-
 static struct request *get_rdac_req(struct scsi_device *sdev,
 			void *buffer, unsigned buflen, int rw)
 {
@@ -544,7 +538,7 @@
 {
 	struct scsi_sense_hdr sense_hdr;
 	int err = SCSI_DH_IO, ret;
-	struct rdac_dh_data *h = get_rdac_data(sdev);
+	struct rdac_dh_data *h = sdev->handler_data;
 
 	ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
 	if (!ret)
@@ -589,7 +583,7 @@
 		container_of(work, struct rdac_controller, ms_work);
 	struct request *rq;
 	struct scsi_device *sdev = ctlr->ms_sdev;
-	struct rdac_dh_data *h = get_rdac_data(sdev);
+	struct rdac_dh_data *h = sdev->handler_data;
 	struct request_queue *q = sdev->request_queue;
 	int err, retry_cnt = RDAC_RETRY_COUNT;
 	struct rdac_queue_data *tmp, *qdata;
@@ -648,7 +642,7 @@
 	if (!qdata)
 		return SCSI_DH_RETRY;
 
-	qdata->h = get_rdac_data(sdev);
+	qdata->h = sdev->handler_data;
 	qdata->callback_fn = fn;
 	qdata->callback_data = data;
 
@@ -667,7 +661,7 @@
 static int rdac_activate(struct scsi_device *sdev,
 			activate_complete fn, void *data)
 {
-	struct rdac_dh_data *h = get_rdac_data(sdev);
+	struct rdac_dh_data *h = sdev->handler_data;
 	int err = SCSI_DH_OK;
 	int act = 0;
 
@@ -702,7 +696,7 @@
 
 static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
 {
-	struct rdac_dh_data *h = get_rdac_data(sdev);
+	struct rdac_dh_data *h = sdev->handler_data;
 	int ret = BLKPREP_OK;
 
 	if (h->state != RDAC_STATE_ACTIVE) {
@@ -716,7 +710,7 @@
 static int rdac_check_sense(struct scsi_device *sdev,
 				struct scsi_sense_hdr *sense_hdr)
 {
-	struct rdac_dh_data *h = get_rdac_data(sdev);
+	struct rdac_dh_data *h = sdev->handler_data;
 
 	RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
 			"I/O returned with sense %02x/%02x/%02x",
@@ -778,56 +772,7 @@
 	return SCSI_RETURN_NOT_HANDLED;
 }
 
-static const struct {
-	char *vendor;
-	char *model;
-} rdac_dev_list[] = {
-	{"IBM", "1722"},
-	{"IBM", "1724"},
-	{"IBM", "1726"},
-	{"IBM", "1742"},
-	{"IBM", "1745"},
-	{"IBM", "1746"},
-	{"IBM", "1813"},
-	{"IBM", "1814"},
-	{"IBM", "1815"},
-	{"IBM", "1818"},
-	{"IBM", "3526"},
-	{"SGI", "TP9"},
-	{"SGI", "IS"},
-	{"STK", "OPENstorage D280"},
-	{"STK", "FLEXLINE 380"},
-	{"SUN", "CSM"},
-	{"SUN", "LCSM100"},
-	{"SUN", "STK6580_6780"},
-	{"SUN", "SUN_6180"},
-	{"SUN", "ArrayStorage"},
-	{"DELL", "MD3"},
-	{"NETAPP", "INF-01-00"},
-	{"LSI", "INF-01-00"},
-	{"ENGENIO", "INF-01-00"},
-	{NULL, NULL},
-};
-
-static bool rdac_match(struct scsi_device *sdev)
-{
-	int i;
-
-	if (scsi_device_tpgs(sdev))
-		return false;
-
-	for (i = 0; rdac_dev_list[i].vendor; i++) {
-		if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
-			strlen(rdac_dev_list[i].vendor)) &&
-		    !strncmp(sdev->model, rdac_dev_list[i].model,
-			strlen(rdac_dev_list[i].model))) {
-			return true;
-		}
-	}
-	return false;
-}
-
-static struct scsi_dh_data *rdac_bus_attach(struct scsi_device *sdev)
+static int rdac_bus_attach(struct scsi_device *sdev)
 {
 	struct rdac_dh_data *h;
 	int err;
@@ -836,7 +781,7 @@
 
 	h = kzalloc(sizeof(*h) , GFP_KERNEL);
 	if (!h)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 	h->lun = UNINITIALIZED_LUN;
 	h->state = RDAC_STATE_ACTIVE;
 
@@ -861,7 +806,8 @@
 		    RDAC_NAME, h->lun, mode[(int)h->mode],
 		    lun_state[(int)h->lun_state]);
 
-	return &h->dh_data;
+	sdev->handler_data = h;
+	return 0;
 
 clean_ctlr:
 	spin_lock(&list_lock);
@@ -870,12 +816,12 @@
 
 failed:
 	kfree(h);
-	return ERR_PTR(-EINVAL);
+	return -EINVAL;
 }
 
 static void rdac_bus_detach( struct scsi_device *sdev )
 {
-	struct rdac_dh_data *h = get_rdac_data(sdev);
+	struct rdac_dh_data *h = sdev->handler_data;
 
 	if (h->ctlr && h->ctlr->ms_queued)
 		flush_workqueue(kmpath_rdacd);
@@ -884,6 +830,7 @@
 	if (h->ctlr)
 		kref_put(&h->ctlr->kref, release_controller);
 	spin_unlock(&list_lock);
+	sdev->handler_data = NULL;
 	kfree(h);
 }
 
@@ -895,7 +842,6 @@
 	.attach = rdac_bus_attach,
 	.detach = rdac_bus_detach,
 	.activate = rdac_activate,
-	.match = rdac_match,
 };
 
 static int __init rdac_init(void)
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index ec193a8..d3eb80c 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -364,7 +364,7 @@
 	 * on the ethertype for the given device
 	 */
 	fcoe->fcoe_packet_type.func = fcoe_rcv;
-	fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+	fcoe->fcoe_packet_type.type = htons(ETH_P_FCOE);
 	fcoe->fcoe_packet_type.dev = netdev;
 	dev_add_pack(&fcoe->fcoe_packet_type);
 
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 3411919..b62836d 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4555,7 +4555,7 @@
 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 	res = (struct ipr_resource_entry *)sdev->hostdata;
 	if (res) {
-		if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
+		if (ipr_is_af_dasd_device(res)) {
 			res->raw_mode = simple_strtoul(buf, NULL, 10);
 			len = strlen(buf);
 			if (res->sdev)
@@ -6383,9 +6383,13 @@
 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
 	}
-	if (res->raw_mode && ipr_is_af_dasd_device(res))
+	if (res->raw_mode && ipr_is_af_dasd_device(res)) {
 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
 
+		if (scsi_cmd->underflow == 0)
+			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
+	}
+
 	if (ioa_cfg->sis64)
 		rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
 	else
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 98d9bb6..33c74d3 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -853,12 +853,9 @@
 				     SAM_STAT_CHECK_CONDITION;
 			scsi_build_sense_buffer(1, sc->sense_buffer,
 						ILLEGAL_REQUEST, 0x10, ascq);
-			sc->sense_buffer[7] = 0xc; /* Additional sense length */
-			sc->sense_buffer[8] = 0;   /* Information desc type */
-			sc->sense_buffer[9] = 0xa; /* Additional desc length */
-			sc->sense_buffer[10] = 0x80; /* Validity bit */
-
-			put_unaligned_be64(sector, &sc->sense_buffer[12]);
+			scsi_set_sense_information(sc->sense_buffer,
+						   SCSI_SENSE_BUFFERSIZE,
+						   sector);
 			goto out;
 		}
 	}
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index eb62772..4abb93a 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2284,7 +2284,7 @@
 			(struct lpfc_rdp_context *)(mbox->context2);
 
 	if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
-		goto error;
+		goto error_mbuf_free;
 
 	lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
 				DMP_SFF_PAGE_A2_SIZE);
@@ -2299,13 +2299,14 @@
 	mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
 	mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
 	if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
-		goto error;
+		goto error_cmd_free;
 
 	return;
 
-error:
+error_mbuf_free:
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	kfree(mp);
+error_cmd_free:
 	lpfc_sli4_mbox_cmd_free(phba, mbox);
 	rdp_context->cmpl(phba, rdp_context, FAILURE);
 }
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6dec7cf..c167911 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -112,9 +112,12 @@
 	if (ret)
 		return ret;
 
+	/* global ioc spinlock to protect controller list on list operations */
 	printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
+	spin_lock(&gioc_lock);
 	list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
 		ioc->fwfault_debug = mpt2sas_fwfault_debug;
+	spin_unlock(&gioc_lock);
 	return 0;
 }
 
@@ -4437,6 +4440,8 @@
 	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 	    __func__));
 
+	/* synchronizing freeing resource with pci_access_mutex lock */
+	mutex_lock(&ioc->pci_access_mutex);
 	if (ioc->chip_phys && ioc->chip) {
 		_base_mask_interrupts(ioc);
 		ioc->shost_recovery = 1;
@@ -4456,6 +4461,7 @@
 		pci_disable_pcie_error_reporting(pdev);
 		pci_disable_device(pdev);
 	}
+	mutex_unlock(&ioc->pci_access_mutex);
 	return;
 }
 
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index caff8d1..97ea360 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -238,6 +238,7 @@
  * @flags: MPT_TARGET_FLAGS_XXX flags
  * @deleted: target flaged for deletion
  * @tm_busy: target is busy with TM request.
+ * @sdev: The sas_device associated with this target
  */
 struct MPT2SAS_TARGET {
 	struct scsi_target *starget;
@@ -248,6 +249,7 @@
 	u32	flags;
 	u8	deleted;
 	u8	tm_busy;
+	struct _sas_device *sdev;
 };
 
 
@@ -376,8 +378,24 @@
 	u8	phy;
 	u8	responding;
 	u8	pfa_led_on;
+	struct kref refcount;
 };
 
+static inline void sas_device_get(struct _sas_device *s)
+{
+	kref_get(&s->refcount);
+}
+
+static inline void sas_device_free(struct kref *r)
+{
+	kfree(container_of(r, struct _sas_device, refcount));
+}
+
+static inline void sas_device_put(struct _sas_device *s)
+{
+	kref_put(&s->refcount, sas_device_free);
+}
+
 /**
  * struct _raid_device - raid volume link list
  * @list: sas device list
@@ -799,6 +817,12 @@
  * @delayed_tr_list: target reset link list
  * @delayed_tr_volume_list: volume target reset link list
  * @@temp_sensors_count: flag to carry the number of temperature sensors
+ * @pci_access_mutex: Mutex to synchronize ioctl,sysfs show path and
+ * pci resource handling. PCI resource freeing will lead to free
+ * vital hardware/memory resource, which might be in use by cli/sysfs
+ * path functions resulting in Null pointer reference followed by kernel
+ * crash. To avoid the above race condition we use mutex syncrhonization
+ * which ensures the syncrhonization between cli/sysfs_show path
  */
 struct MPT2SAS_ADAPTER {
 	struct list_head list;
@@ -1015,6 +1039,7 @@
 	u8		mfg_pg10_hide_flag;
 	u8		hide_drives;
 
+	struct mutex pci_access_mutex;
 };
 
 typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1023,6 +1048,17 @@
 
 /* base shared API */
 extern struct list_head mpt2sas_ioc_list;
+/* spinlock on list operations over IOCs
+ * Case: when multiple warpdrive cards(IOCs) are in use
+ * Each IOC will added to the ioc list stucture on initialization.
+ * Watchdog threads run at regular intervals to check IOC for any
+ * fault conditions which will trigger the dead_ioc thread to
+ * deallocate pci resource, resulting deleting the IOC netry from list,
+ * this deletion need to protected by spinlock to enusre that
+ * ioc removal is syncrhonized, if not synchronized it might lead to
+ * list_del corruption as the ioc list is traversed in cli path
+ */
+extern spinlock_t gioc_lock;
 void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc);
 void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc);
 
@@ -1095,11 +1131,12 @@
     u16 handle);
 struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER
     *ioc, u64 sas_address);
-struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address(
+struct _sas_device *mpt2sas_get_sdev_by_addr(
+    struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
+struct _sas_device *__mpt2sas_get_sdev_by_addr(
     struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
 
 void mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc);
-
 void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
 
 /* config shared API */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 4e50960..3694b63 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -427,13 +427,16 @@
 _ctl_verify_adapter(int ioc_number, struct MPT2SAS_ADAPTER **iocpp)
 {
 	struct MPT2SAS_ADAPTER *ioc;
-
+	/* global ioc lock to protect controller on list operations */
+	spin_lock(&gioc_lock);
 	list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
 		if (ioc->id != ioc_number)
 			continue;
+		spin_unlock(&gioc_lock);
 		*iocpp = ioc;
 		return ioc_number;
 	}
+	spin_unlock(&gioc_lock);
 	*iocpp = NULL;
 	return -1;
 }
@@ -522,10 +525,15 @@
 
 	poll_wait(filep, &ctl_poll_wait, wait);
 
+	/* global ioc lock to protect controller on list operations */
+	spin_lock(&gioc_lock);
 	list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
-		if (ioc->aen_event_read_flag)
+		if (ioc->aen_event_read_flag) {
+			spin_unlock(&gioc_lock);
 			return POLLIN | POLLRDNORM;
+		}
 	}
+	spin_unlock(&gioc_lock);
 	return 0;
 }
 
@@ -2168,16 +2176,23 @@
 
 	if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc)
 		return -ENODEV;
+	/* pci_access_mutex lock acquired by ioctl path */
+	mutex_lock(&ioc->pci_access_mutex);
 	if (ioc->shost_recovery || ioc->pci_error_recovery ||
-	    ioc->is_driver_loading)
-		return -EAGAIN;
+		ioc->is_driver_loading || ioc->remove_host) {
+		ret = -EAGAIN;
+		goto out_unlock_pciaccess;
+	}
 
 	state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
 	if (state == NON_BLOCKING) {
-		if (!mutex_trylock(&ioc->ctl_cmds.mutex))
-			return -EAGAIN;
+		if (!mutex_trylock(&ioc->ctl_cmds.mutex)) {
+			ret = -EAGAIN;
+			goto out_unlock_pciaccess;
+		}
 	} else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
-		return -ERESTARTSYS;
+		ret = -ERESTARTSYS;
+		goto out_unlock_pciaccess;
 	}
 
 	switch (cmd) {
@@ -2258,6 +2273,8 @@
 	}
 
 	mutex_unlock(&ioc->ctl_cmds.mutex);
+out_unlock_pciaccess:
+	mutex_unlock(&ioc->pci_access_mutex);
 	return ret;
 }
 
@@ -2711,6 +2728,12 @@
 		    "warpdrive\n", ioc->name, __func__);
 		goto out;
 	}
+	/* pci_access_mutex lock acquired by sysfs show path */
+	mutex_lock(&ioc->pci_access_mutex);
+	if (ioc->pci_error_recovery || ioc->remove_host) {
+		mutex_unlock(&ioc->pci_access_mutex);
+		return 0;
+	}
 
 	/* allocate upto GPIOVal 36 entries */
 	sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
@@ -2749,6 +2772,7 @@
 
  out:
 	kfree(io_unit_pg3);
+	mutex_unlock(&ioc->pci_access_mutex);
 	return rc;
 }
 static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 3f26147..0ad09b2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -79,7 +79,8 @@
 
 /* global parameters */
 LIST_HEAD(mpt2sas_ioc_list);
-
+/* global ioc lock for list operations */
+DEFINE_SPINLOCK(gioc_lock);
 /* local parameters */
 static u8 scsi_io_cb_idx = -1;
 static u8 tm_cb_idx = -1;
@@ -176,9 +177,37 @@
 	u8			VP_ID;
 	u8			ignore;
 	u16			event;
+	struct kref		refcount;
 	char			event_data[0] __aligned(4);
 };
 
+static void fw_event_work_free(struct kref *r)
+{
+	kfree(container_of(r, struct fw_event_work, refcount));
+}
+
+static void fw_event_work_get(struct fw_event_work *fw_work)
+{
+	kref_get(&fw_work->refcount);
+}
+
+static void fw_event_work_put(struct fw_event_work *fw_work)
+{
+	kref_put(&fw_work->refcount, fw_event_work_free);
+}
+
+static struct fw_event_work *alloc_fw_event_work(int len)
+{
+	struct fw_event_work *fw_event;
+
+	fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
+	if (!fw_event)
+		return NULL;
+
+	kref_init(&fw_event->refcount);
+	return fw_event;
+}
+
 /* raid transport support */
 static struct raid_template *mpt2sas_raid_template;
 
@@ -293,8 +322,10 @@
 		return ret;
 
 	printk(KERN_INFO "setting logging_level(0x%08x)\n", logging_level);
+	spin_lock(&gioc_lock);
 	list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
 		ioc->logging_level = logging_level;
+	spin_unlock(&gioc_lock);
 	return 0;
 }
 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
@@ -526,8 +557,61 @@
 	}
 }
 
+static struct _sas_device *
+__mpt2sas_get_sdev_from_target(struct MPT2SAS_ADAPTER *ioc,
+		struct MPT2SAS_TARGET *tgt_priv)
+{
+	struct _sas_device *ret;
+
+	assert_spin_locked(&ioc->sas_device_lock);
+
+	ret = tgt_priv->sdev;
+	if (ret)
+		sas_device_get(ret);
+
+	return ret;
+}
+
+static struct _sas_device *
+mpt2sas_get_sdev_from_target(struct MPT2SAS_ADAPTER *ioc,
+		struct MPT2SAS_TARGET *tgt_priv)
+{
+	struct _sas_device *ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ioc->sas_device_lock, flags);
+	ret = __mpt2sas_get_sdev_from_target(ioc, tgt_priv);
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+	return ret;
+}
+
+
+struct _sas_device *
+__mpt2sas_get_sdev_by_addr(struct MPT2SAS_ADAPTER *ioc,
+    u64 sas_address)
+{
+	struct _sas_device *sas_device;
+
+	assert_spin_locked(&ioc->sas_device_lock);
+
+	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+		if (sas_device->sas_address == sas_address)
+			goto found_device;
+
+	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+		if (sas_device->sas_address == sas_address)
+			goto found_device;
+
+	return NULL;
+
+found_device:
+	sas_device_get(sas_device);
+	return sas_device;
+}
+
 /**
- * mpt2sas_scsih_sas_device_find_by_sas_address - sas device search
+ * mpt2sas_get_sdev_by_addr - sas device search
  * @ioc: per adapter object
  * @sas_address: sas address
  * Context: Calling function should acquire ioc->sas_device_lock
@@ -536,24 +620,44 @@
  * object.
  */
 struct _sas_device *
-mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
+mpt2sas_get_sdev_by_addr(struct MPT2SAS_ADAPTER *ioc,
     u64 sas_address)
 {
 	struct _sas_device *sas_device;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ioc->sas_device_lock, flags);
+	sas_device = __mpt2sas_get_sdev_by_addr(ioc,
+			sas_address);
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+	return sas_device;
+}
+
+static struct _sas_device *
+__mpt2sas_get_sdev_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+	struct _sas_device *sas_device;
+
+	assert_spin_locked(&ioc->sas_device_lock);
 
 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
-		if (sas_device->sas_address == sas_address)
-			return sas_device;
+		if (sas_device->handle == handle)
+			goto found_device;
 
 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
-		if (sas_device->sas_address == sas_address)
-			return sas_device;
+		if (sas_device->handle == handle)
+			goto found_device;
 
 	return NULL;
+
+found_device:
+	sas_device_get(sas_device);
+	return sas_device;
 }
 
 /**
- * _scsih_sas_device_find_by_handle - sas device search
+ * mpt2sas_get_sdev_by_handle - sas device search
  * @ioc: per adapter object
  * @handle: sas device handle (assigned by firmware)
  * Context: Calling function should acquire ioc->sas_device_lock
@@ -562,19 +666,16 @@
  * object.
  */
 static struct _sas_device *
-_scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+mpt2sas_get_sdev_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 {
 	struct _sas_device *sas_device;
+	unsigned long flags;
 
-	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
-		if (sas_device->handle == handle)
-			return sas_device;
+	spin_lock_irqsave(&ioc->sas_device_lock, flags);
+	sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
-	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
-		if (sas_device->handle == handle)
-			return sas_device;
-
-	return NULL;
+	return sas_device;
 }
 
 /**
@@ -583,7 +684,7 @@
  * @sas_device: the sas_device object
  * Context: This function will acquire ioc->sas_device_lock.
  *
- * Removing object and freeing associated memory from the ioc->sas_device_list.
+ * If sas_device is on the list, remove it and decrement its reference count.
  */
 static void
 _scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc,
@@ -594,9 +695,15 @@
 	if (!sas_device)
 		return;
 
+	/*
+	 * The lock serializes access to the list, but we still need to verify
+	 * that nobody removed the entry while we were waiting on the lock.
+	 */
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	list_del(&sas_device->list);
-	kfree(sas_device);
+	if (!list_empty(&sas_device->list)) {
+		list_del_init(&sas_device->list);
+		sas_device_put(sas_device);
+	}
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 }
 
@@ -620,6 +727,7 @@
 	    sas_device->handle, (unsigned long long)sas_device->sas_address));
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
+	sas_device_get(sas_device);
 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
@@ -659,6 +767,7 @@
 	    sas_device->handle, (unsigned long long)sas_device->sas_address));
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
+	sas_device_get(sas_device);
 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
 	_scsih_determine_boot_device(ioc, sas_device, 0);
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -1208,12 +1317,15 @@
 		goto not_sata;
 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
 		goto not_sata;
+
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
-	   sas_device_priv_data->sas_target->sas_address);
-	if (sas_device && sas_device->device_info &
-	    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
-		max_depth = MPT2SAS_SATA_QUEUE_DEPTH;
+	sas_device = __mpt2sas_get_sdev_from_target(ioc, sas_target_priv_data);
+	if (sas_device) {
+		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+			max_depth = MPT2SAS_SATA_QUEUE_DEPTH;
+
+		sas_device_put(sas_device);
+	}
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
  not_sata:
@@ -1271,18 +1383,20 @@
 	/* sas/sata devices */
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	rphy = dev_to_rphy(starget->dev.parent);
-	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+	sas_device = __mpt2sas_get_sdev_by_addr(ioc,
 	   rphy->identify.sas_address);
 
 	if (sas_device) {
 		sas_target_priv_data->handle = sas_device->handle;
 		sas_target_priv_data->sas_address = sas_device->sas_address;
+		sas_target_priv_data->sdev = sas_device;
 		sas_device->starget = starget;
 		sas_device->id = starget->id;
 		sas_device->channel = starget->channel;
 		if (test_bit(sas_device->handle, ioc->pd_handles))
 			sas_target_priv_data->flags |=
 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
+
 	}
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
@@ -1324,13 +1438,21 @@
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	rphy = dev_to_rphy(starget->dev.parent);
-	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
-	   rphy->identify.sas_address);
+	sas_device = __mpt2sas_get_sdev_from_target(ioc, sas_target_priv_data);
 	if (sas_device && (sas_device->starget == starget) &&
 	    (sas_device->id == starget->id) &&
 	    (sas_device->channel == starget->channel))
 		sas_device->starget = NULL;
 
+	if (sas_device) {
+		/*
+		 * Corresponding get() is in _scsih_target_alloc()
+		 */
+		sas_target_priv_data->sdev = NULL;
+		sas_device_put(sas_device);
+
+		sas_device_put(sas_device);
+	}
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
  out:
@@ -1386,7 +1508,7 @@
 
 	if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
-		sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+		sas_device = __mpt2sas_get_sdev_by_addr(ioc,
 				sas_target_priv_data->sas_address);
 		if (sas_device && (sas_device->starget == NULL)) {
 			sdev_printk(KERN_INFO, sdev,
@@ -1394,6 +1516,10 @@
 			     __func__, __LINE__);
 			sas_device->starget = starget;
 		}
+
+		if (sas_device)
+			sas_device_put(sas_device);
+
 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	}
 
@@ -1428,10 +1554,13 @@
 
 	if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
-		sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
-		   sas_target_priv_data->sas_address);
+		sas_device = __mpt2sas_get_sdev_from_target(ioc,
+				sas_target_priv_data);
 		if (sas_device && !sas_target_priv_data->num_luns)
 			sas_device->starget = NULL;
+
+		if (sas_device)
+			sas_device_put(sas_device);
 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	}
 
@@ -2078,7 +2207,7 @@
 	}
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+	sas_device = __mpt2sas_get_sdev_by_addr(ioc,
 	   sas_device_priv_data->sas_target->sas_address);
 	if (!sas_device) {
 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -2112,17 +2241,18 @@
 	    (unsigned long long) sas_device->enclosure_logical_id,
 	    sas_device->slot);
 
+	sas_device_put(sas_device);
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	if (!ssp_target)
 		_scsih_display_sata_capabilities(ioc, handle, sdev);
 
-
 	_scsih_change_queue_depth(sdev, qdepth);
 
 	if (ssp_target) {
 		sas_read_port_mode_page(sdev);
 		_scsih_enable_tlr(ioc, sdev);
 	}
+
 	return 0;
 }
 
@@ -2509,8 +2639,7 @@
 		    device_str, (unsigned long long)priv_target->sas_address);
 	} else {
 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
-		sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
-		    priv_target->sas_address);
+		sas_device = __mpt2sas_get_sdev_from_target(ioc, priv_target);
 		if (sas_device) {
 			if (priv_target->flags &
 			    MPT_TARGET_FLAGS_RAID_COMPONENT) {
@@ -2529,6 +2658,8 @@
 			    "enclosure_logical_id(0x%016llx), slot(%d)\n",
 			   (unsigned long long)sas_device->enclosure_logical_id,
 			    sas_device->slot);
+
+			sas_device_put(sas_device);
 		}
 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	}
@@ -2604,12 +2735,12 @@
 {
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
-	struct _sas_device *sas_device;
-	unsigned long flags;
+	struct _sas_device *sas_device = NULL;
 	u16	handle;
 	int r;
 
 	struct scsi_target *starget = scmd->device->sdev_target;
+	struct MPT2SAS_TARGET *target_priv_data = starget->hostdata;
 
 	starget_printk(KERN_INFO, starget, "attempting device reset! "
 	    "scmd(%p)\n", scmd);
@@ -2629,12 +2760,10 @@
 	handle = 0;
 	if (sas_device_priv_data->sas_target->flags &
 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
-		spin_lock_irqsave(&ioc->sas_device_lock, flags);
-		sas_device = _scsih_sas_device_find_by_handle(ioc,
-		   sas_device_priv_data->sas_target->handle);
+		sas_device = mpt2sas_get_sdev_from_target(ioc,
+				target_priv_data);
 		if (sas_device)
 			handle = sas_device->volume_handle;
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	} else
 		handle = sas_device_priv_data->sas_target->handle;
 
@@ -2651,6 +2780,10 @@
  out:
 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+	if (sas_device)
+		sas_device_put(sas_device);
+
 	return r;
 }
 
@@ -2665,11 +2798,11 @@
 {
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
-	struct _sas_device *sas_device;
-	unsigned long flags;
+	struct _sas_device *sas_device = NULL;
 	u16	handle;
 	int r;
 	struct scsi_target *starget = scmd->device->sdev_target;
+	struct MPT2SAS_TARGET *target_priv_data = starget->hostdata;
 
 	starget_printk(KERN_INFO, starget, "attempting target reset! "
 	    "scmd(%p)\n", scmd);
@@ -2689,12 +2822,10 @@
 	handle = 0;
 	if (sas_device_priv_data->sas_target->flags &
 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
-		spin_lock_irqsave(&ioc->sas_device_lock, flags);
-		sas_device = _scsih_sas_device_find_by_handle(ioc,
-		   sas_device_priv_data->sas_target->handle);
+		sas_device = mpt2sas_get_sdev_from_target(ioc,
+				target_priv_data);
 		if (sas_device)
 			handle = sas_device->volume_handle;
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	} else
 		handle = sas_device_priv_data->sas_target->handle;
 
@@ -2711,6 +2842,10 @@
  out:
 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+	if (sas_device)
+		sas_device_put(sas_device);
+
 	return r;
 }
 
@@ -2768,36 +2903,39 @@
 		return;
 
 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
+	fw_event_work_get(fw_event);
 	list_add_tail(&fw_event->list, &ioc->fw_event_list);
 	INIT_DELAYED_WORK(&fw_event->delayed_work, _firmware_event_work);
+	fw_event_work_get(fw_event);
 	queue_delayed_work(ioc->firmware_event_thread,
 	    &fw_event->delayed_work, 0);
 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 }
 
 /**
- * _scsih_fw_event_free - delete fw_event
+ * _scsih_fw_event_del_from_list - delete fw_event from the list
  * @ioc: per adapter object
  * @fw_event: object describing the event
  * Context: This function will acquire ioc->fw_event_lock.
  *
- * This removes firmware event object from link list, frees associated memory.
+ * If the fw_event is on the fw_event_list, remove it and do a put.
  *
  * Return nothing.
  */
 static void
-_scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
+_scsih_fw_event_del_from_list(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
     *fw_event)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
-	list_del(&fw_event->list);
-	kfree(fw_event);
+	if (!list_empty(&fw_event->list)) {
+		list_del_init(&fw_event->list);
+		fw_event_work_put(fw_event);
+	}
 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 }
 
-
 /**
  * _scsih_error_recovery_delete_devices - remove devices not responding
  * @ioc: per adapter object
@@ -2812,13 +2950,14 @@
 	if (ioc->is_driver_loading)
 		return;
 
-	fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+	fw_event = alloc_fw_event_work(0);
 	if (!fw_event)
 		return;
 
 	fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES;
 	fw_event->ioc = ioc;
 	_scsih_fw_event_add(ioc, fw_event);
+	fw_event_work_put(fw_event);
 }
 
 /**
@@ -2832,12 +2971,29 @@
 {
 	struct fw_event_work *fw_event;
 
-	fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+	fw_event = alloc_fw_event_work(0);
 	if (!fw_event)
 		return;
 	fw_event->event = MPT2SAS_PORT_ENABLE_COMPLETE;
 	fw_event->ioc = ioc;
 	_scsih_fw_event_add(ioc, fw_event);
+	fw_event_work_put(fw_event);
+}
+
+static struct fw_event_work *dequeue_next_fw_event(struct MPT2SAS_ADAPTER *ioc)
+{
+	unsigned long flags;
+	struct fw_event_work *fw_event = NULL;
+
+	spin_lock_irqsave(&ioc->fw_event_lock, flags);
+	if (!list_empty(&ioc->fw_event_list)) {
+		fw_event = list_first_entry(&ioc->fw_event_list,
+				struct fw_event_work, list);
+		list_del_init(&fw_event->list);
+	}
+	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+
+	return fw_event;
 }
 
 /**
@@ -2852,17 +3008,25 @@
 static void
 _scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
 {
-	struct fw_event_work *fw_event, *next;
+	struct fw_event_work *fw_event;
 
 	if (list_empty(&ioc->fw_event_list) ||
 	     !ioc->firmware_event_thread || in_interrupt())
 		return;
 
-	list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
-		if (cancel_delayed_work_sync(&fw_event->delayed_work)) {
-			_scsih_fw_event_free(ioc, fw_event);
-			continue;
-		}
+	while ((fw_event = dequeue_next_fw_event(ioc))) {
+		/*
+		 * Wait on the fw_event to complete. If this returns 1, then
+		 * the event was never executed, and we need a put for the
+		 * reference the delayed_work had on the fw_event.
+		 *
+		 * If it did execute, we wait for it to finish, and the put will
+		 * happen from _firmware_event_work()
+		 */
+		if (cancel_delayed_work_sync(&fw_event->delayed_work))
+			fw_event_work_put(fw_event);
+
+		fw_event_work_put(fw_event);
 	}
 }
 
@@ -3002,15 +3166,15 @@
 
 	list_for_each_entry(mpt2sas_port,
 	   &sas_expander->sas_port_list, port_list) {
-		if (mpt2sas_port->remote_identify.device_type ==
-		    SAS_END_DEVICE) {
+		if (mpt2sas_port->remote_identify.device_type == SAS_END_DEVICE) {
 			spin_lock_irqsave(&ioc->sas_device_lock, flags);
-			sas_device =
-			    mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
-			   mpt2sas_port->remote_identify.sas_address);
-			if (sas_device)
+			sas_device = __mpt2sas_get_sdev_by_addr(ioc,
+					mpt2sas_port->remote_identify.sas_address);
+			if (sas_device) {
 				set_bit(sas_device->handle,
-				    ioc->blocking_handles);
+						ioc->blocking_handles);
+				sas_device_put(sas_device);
+			}
 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 		}
 	}
@@ -3080,7 +3244,7 @@
 {
 	Mpi2SCSITaskManagementRequest_t *mpi_request;
 	u16 smid;
-	struct _sas_device *sas_device;
+	struct _sas_device *sas_device = NULL;
 	struct MPT2SAS_TARGET *sas_target_priv_data = NULL;
 	u64 sas_address = 0;
 	unsigned long flags;
@@ -3110,7 +3274,7 @@
 		return;
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+	sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
 	if (sas_device && sas_device->starget &&
 	     sas_device->starget->hostdata) {
 		sas_target_priv_data = sas_device->starget->hostdata;
@@ -3131,14 +3295,14 @@
 	if (!smid) {
 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
 		if (!delayed_tr)
-			return;
+			goto out;
 		INIT_LIST_HEAD(&delayed_tr->list);
 		delayed_tr->handle = handle;
 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
 		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
 		    "DELAYED:tr:handle(0x%04x), (open)\n",
 		    ioc->name, handle));
-		return;
+		goto out;
 	}
 
 	dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), "
@@ -3150,6 +3314,9 @@
 	mpi_request->DevHandle = cpu_to_le16(handle);
 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
 	mpt2sas_base_put_smid_hi_priority(ioc, smid);
+out:
+	if (sas_device)
+		sas_device_put(sas_device);
 }
 
 
@@ -4068,7 +4235,6 @@
 	char *desc_scsi_state = ioc->tmp_string;
 	u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
 	struct _sas_device *sas_device = NULL;
-	unsigned long flags;
 	struct scsi_target *starget = scmd->device->sdev_target;
 	struct MPT2SAS_TARGET *priv_target = starget->hostdata;
 	char *device_str = NULL;
@@ -4200,9 +4366,7 @@
 		printk(MPT2SAS_WARN_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
 		    device_str, (unsigned long long)priv_target->sas_address);
 	} else {
-		spin_lock_irqsave(&ioc->sas_device_lock, flags);
-		sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
-		    priv_target->sas_address);
+		sas_device = mpt2sas_get_sdev_from_target(ioc, priv_target);
 		if (sas_device) {
 			printk(MPT2SAS_WARN_FMT "\tsas_address(0x%016llx), "
 			    "phy(%d)\n", ioc->name, sas_device->sas_address,
@@ -4211,8 +4375,9 @@
 			    "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
 			    ioc->name, sas_device->enclosure_logical_id,
 			    sas_device->slot);
+
+			sas_device_put(sas_device);
 		}
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	}
 
 	printk(MPT2SAS_WARN_FMT "\thandle(0x%04x), ioc_status(%s)(0x%04x), "
@@ -4259,7 +4424,7 @@
 	Mpi2SepRequest_t mpi_request;
 	struct _sas_device *sas_device;
 
-	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+	sas_device = mpt2sas_get_sdev_by_handle(ioc, handle);
 	if (!sas_device)
 		return;
 
@@ -4274,7 +4439,7 @@
 	    &mpi_request)) != 0) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
 		__FILE__, __LINE__, __func__);
-		return;
+		goto out;
 	}
 	sas_device->pfa_led_on = 1;
 
@@ -4284,8 +4449,10 @@
 		 "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
 		 ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
 		 le32_to_cpu(mpi_reply.IOCLogInfo)));
-		return;
+		goto out;
 	}
+out:
+	sas_device_put(sas_device);
 }
 
 /**
@@ -4340,13 +4507,14 @@
 {
 	struct fw_event_work *fw_event;
 
-	fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+	fw_event = alloc_fw_event_work(0);
 	if (!fw_event)
 		return;
 	fw_event->event = MPT2SAS_TURN_ON_PFA_LED;
 	fw_event->device_handle = handle;
 	fw_event->ioc = ioc;
 	_scsih_fw_event_add(ioc, fw_event);
+	fw_event_work_put(fw_event);
 }
 
 /**
@@ -4370,19 +4538,17 @@
 
 	/* only handle non-raid devices */
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+	sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
 	if (!sas_device) {
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		return;
+		goto out_unlock;
 	}
 	starget = sas_device->starget;
 	sas_target_priv_data = starget->hostdata;
 
 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
-	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) {
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		return;
-	}
+	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
+		goto out_unlock;
+
 	starget_printk(KERN_WARNING, starget, "predicted fault\n");
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
@@ -4396,7 +4562,7 @@
 	if (!event_reply) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
-		return;
+		goto out;
 	}
 
 	event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
@@ -4413,6 +4579,14 @@
 	event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
 	mpt2sas_ctl_add_to_event_log(ioc, event_reply);
 	kfree(event_reply);
+out:
+	if (sas_device)
+		sas_device_put(sas_device);
+	return;
+
+out_unlock:
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+	goto out;
 }
 
 /**
@@ -5148,14 +5322,13 @@
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
-	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+	sas_device = __mpt2sas_get_sdev_by_addr(ioc,
 	    sas_address);
 
 	if (!sas_device) {
 		printk(MPT2SAS_ERR_FMT "device is not present "
 		    "handle(0x%04x), no sas_device!!!\n", ioc->name, handle);
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		return;
+		goto out_unlock;
 	}
 
 	if (unlikely(sas_device->handle != handle)) {
@@ -5172,19 +5345,24 @@
 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
 		printk(MPT2SAS_ERR_FMT "device is not present "
 		    "handle(0x%04x), flags!!!\n", ioc->name, handle);
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		return;
+		goto out_unlock;
 	}
 
 	/* check if there were any issues with discovery */
 	if (_scsih_check_access_status(ioc, sas_address, handle,
-	    sas_device_pg0.AccessStatus)) {
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		return;
-	}
+	    sas_device_pg0.AccessStatus))
+		goto out_unlock;
+
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	_scsih_ublock_io_device(ioc, sas_address);
+	if (sas_device)
+		sas_device_put(sas_device);
+	return;
 
+out_unlock:
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+	if (sas_device)
+		sas_device_put(sas_device);
 }
 
 /**
@@ -5208,7 +5386,6 @@
 	u32 ioc_status;
 	__le64 sas_address;
 	u32 device_info;
-	unsigned long flags;
 
 	if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -5250,14 +5427,13 @@
 		return -1;
 	}
 
-
-	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+	sas_device = mpt2sas_get_sdev_by_addr(ioc,
 	    sas_address);
-	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
-	if (sas_device)
+	if (sas_device) {
+		sas_device_put(sas_device);
 		return 0;
+	}
 
 	sas_device = kzalloc(sizeof(struct _sas_device),
 	    GFP_KERNEL);
@@ -5267,6 +5443,7 @@
 		return -1;
 	}
 
+	kref_init(&sas_device->refcount);
 	sas_device->handle = handle;
 	if (_scsih_get_sas_address(ioc, le16_to_cpu
 		(sas_device_pg0.ParentDevHandle),
@@ -5296,6 +5473,7 @@
 	else
 		_scsih_sas_device_add(ioc, sas_device);
 
+	sas_device_put(sas_device);
 	return 0;
 }
 
@@ -5344,7 +5522,6 @@
 	    "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
 	    sas_device->handle, (unsigned long long)
 	    sas_device->sas_address));
-	kfree(sas_device);
 }
 /**
  * _scsih_device_remove_by_handle - removing device object by handle
@@ -5363,12 +5540,17 @@
 		return;
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
-	if (sas_device)
-		list_del(&sas_device->list);
+	sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
+	if (sas_device) {
+		list_del_init(&sas_device->list);
+		sas_device_put(sas_device);
+	}
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-	if (sas_device)
+
+	if (sas_device) {
 		_scsih_remove_device(ioc, sas_device);
+		sas_device_put(sas_device);
+	}
 }
 
 /**
@@ -5389,13 +5571,17 @@
 		return;
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
-	    sas_address);
-	if (sas_device)
-		list_del(&sas_device->list);
+	sas_device = __mpt2sas_get_sdev_by_addr(ioc, sas_address);
+	if (sas_device) {
+		list_del_init(&sas_device->list);
+		sas_device_put(sas_device);
+	}
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-	if (sas_device)
+
+	if (sas_device) {
 		_scsih_remove_device(ioc, sas_device);
+		sas_device_put(sas_device);
+	}
 }
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 /**
@@ -5716,26 +5902,28 @@
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_address = le64_to_cpu(event_data->SASAddress);
-	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+	sas_device = __mpt2sas_get_sdev_by_addr(ioc,
 	    sas_address);
 
-	if (!sas_device || !sas_device->starget) {
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		return;
-	}
+	if (!sas_device || !sas_device->starget)
+		goto out;
 
 	target_priv_data = sas_device->starget->hostdata;
-	if (!target_priv_data) {
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		return;
-	}
+	if (!target_priv_data)
+		goto out;
 
 	if (event_data->ReasonCode ==
 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
 		target_priv_data->tm_busy = 1;
 	else
 		target_priv_data->tm_busy = 0;
+
+out:
+	if (sas_device)
+		sas_device_put(sas_device);
+
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
 }
 
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -6123,7 +6311,7 @@
 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+	sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
 	if (sas_device) {
 		sas_device->volume_handle = 0;
 		sas_device->volume_wwid = 0;
@@ -6142,6 +6330,8 @@
 	/* exposing raid component */
 	if (starget)
 		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
+
+	sas_device_put(sas_device);
 }
 
 /**
@@ -6170,7 +6360,7 @@
 		    &volume_wwid);
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+	sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
 	if (sas_device) {
 		set_bit(handle, ioc->pd_handles);
 		if (sas_device->starget && sas_device->starget->hostdata) {
@@ -6189,6 +6379,8 @@
 	/* hiding raid component */
 	if (starget)
 		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
+
+	sas_device_put(sas_device);
 }
 
 /**
@@ -6221,7 +6413,6 @@
     Mpi2EventIrConfigElement_t *element)
 {
 	struct _sas_device *sas_device;
-	unsigned long flags;
 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
 	Mpi2ConfigReply_t mpi_reply;
 	Mpi2SasDevicePage0_t sas_device_pg0;
@@ -6231,11 +6422,11 @@
 
 	set_bit(handle, ioc->pd_handles);
 
-	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
-	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-	if (sas_device)
+	sas_device = mpt2sas_get_sdev_by_handle(ioc, handle);
+	if (sas_device) {
+		sas_device_put(sas_device);
 		return;
+	}
 
 	if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -6509,7 +6700,6 @@
 	u16 handle, parent_handle;
 	u32 state;
 	struct _sas_device *sas_device;
-	unsigned long flags;
 	Mpi2ConfigReply_t mpi_reply;
 	Mpi2SasDevicePage0_t sas_device_pg0;
 	u32 ioc_status;
@@ -6542,12 +6732,11 @@
 		if (!ioc->is_warpdrive)
 			set_bit(handle, ioc->pd_handles);
 
-		spin_lock_irqsave(&ioc->sas_device_lock, flags);
-		sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
-		if (sas_device)
+		sas_device = mpt2sas_get_sdev_by_handle(ioc, handle);
+		if (sas_device) {
+			sas_device_put(sas_device);
 			return;
+		}
 
 		if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
@@ -7015,6 +7204,7 @@
 	struct _raid_device *raid_device, *raid_device_next;
 	struct list_head tmp_list;
 	unsigned long flags;
+	LIST_HEAD(head);
 
 	printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n",
 	    ioc->name);
@@ -7022,14 +7212,29 @@
 	/* removing unresponding end devices */
 	printk(MPT2SAS_INFO_FMT "removing unresponding devices: end-devices\n",
 	    ioc->name);
+
+	/*
+	 * Iterate, pulling off devices marked as non-responding. We become the
+	 * owner for the reference the list had on any object we prune.
+	 */
+	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	list_for_each_entry_safe(sas_device, sas_device_next,
-	    &ioc->sas_device_list, list) {
+			&ioc->sas_device_list, list) {
 		if (!sas_device->responding)
-			mpt2sas_device_remove_by_sas_address(ioc,
-				sas_device->sas_address);
+			list_move_tail(&sas_device->list, &head);
 		else
 			sas_device->responding = 0;
 	}
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+	/*
+	 * Now, uninitialize and remove the unresponding devices we pruned.
+	 */
+	list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
+		_scsih_remove_device(ioc, sas_device);
+		list_del_init(&sas_device->list);
+		sas_device_put(sas_device);
+	}
 
 	/* removing unresponding volumes */
 	if (ioc->ir_firmware) {
@@ -7179,11 +7384,11 @@
 		}
 		phys_disk_num = pd_pg0.PhysDiskNum;
 		handle = le16_to_cpu(pd_pg0.DevHandle);
-		spin_lock_irqsave(&ioc->sas_device_lock, flags);
-		sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		if (sas_device)
+		sas_device = mpt2sas_get_sdev_by_handle(ioc, handle);
+		if (sas_device) {
+			sas_device_put(sas_device);
 			continue;
+		}
 		if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
 		    handle) != 0)
@@ -7302,12 +7507,12 @@
 		if (!(_scsih_is_end_device(
 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
 			continue;
-		spin_lock_irqsave(&ioc->sas_device_lock, flags);
-		sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+		sas_device = mpt2sas_get_sdev_by_addr(ioc,
 		    le64_to_cpu(sas_device_pg0.SASAddress));
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		if (sas_device)
+		if (sas_device) {
+			sas_device_put(sas_device);
 			continue;
+		}
 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
 			printk(MPT2SAS_INFO_FMT "\tBEFORE adding end device: "
@@ -7410,17 +7615,27 @@
 	    struct fw_event_work, delayed_work.work);
 	struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
 
+	_scsih_fw_event_del_from_list(ioc, fw_event);
+
 	/* the queue is being flushed so ignore this event */
-	if (ioc->remove_host ||
-	    ioc->pci_error_recovery) {
-		_scsih_fw_event_free(ioc, fw_event);
+	if (ioc->remove_host || ioc->pci_error_recovery) {
+		fw_event_work_put(fw_event);
 		return;
 	}
 
 	switch (fw_event->event) {
 	case MPT2SAS_REMOVE_UNRESPONDING_DEVICES:
-		while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery)
+		while (scsi_host_in_recovery(ioc->shost) ||
+				ioc->shost_recovery) {
+			/*
+			 * If we're unloading, bail. Otherwise, this can become
+			 * an infinite loop.
+			 */
+			if (ioc->remove_host)
+				goto out;
+
 			ssleep(1);
+		}
 		_scsih_remove_unresponding_sas_devices(ioc);
 		_scsih_scan_for_devices_after_reset(ioc);
 		break;
@@ -7469,7 +7684,8 @@
 		_scsih_sas_ir_operation_status_event(ioc, fw_event);
 		break;
 	}
-	_scsih_fw_event_free(ioc, fw_event);
+out:
+	fw_event_work_put(fw_event);
 }
 
 /**
@@ -7607,7 +7823,7 @@
 	}
 
 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
-	fw_event = kzalloc(sizeof(*fw_event) + sz, GFP_ATOMIC);
+	fw_event = alloc_fw_event_work(sz);
 	if (!fw_event) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
@@ -7620,6 +7836,7 @@
 	fw_event->VP_ID = mpi_reply->VP_ID;
 	fw_event->event = event;
 	_scsih_fw_event_add(ioc, fw_event);
+	fw_event_work_put(fw_event);
 	return;
 }
 
@@ -7867,7 +8084,9 @@
 	sas_remove_host(shost);
 	scsi_remove_host(shost);
 	mpt2sas_base_detach(ioc);
+	spin_lock(&gioc_lock);
 	list_del(&ioc->list);
+	spin_unlock(&gioc_lock);
 	scsi_host_put(shost);
 }
 
@@ -7966,6 +8185,48 @@
 	}
 }
 
+static struct _sas_device *get_next_sas_device(struct MPT2SAS_ADAPTER *ioc)
+{
+	struct _sas_device *sas_device = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ioc->sas_device_lock, flags);
+	if (!list_empty(&ioc->sas_device_init_list)) {
+		sas_device = list_first_entry(&ioc->sas_device_init_list,
+				struct _sas_device, list);
+		sas_device_get(sas_device);
+	}
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+	return sas_device;
+}
+
+static void sas_device_make_active(struct MPT2SAS_ADAPTER *ioc,
+		struct _sas_device *sas_device)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ioc->sas_device_lock, flags);
+
+	/*
+	 * Since we dropped the lock during the call to port_add(), we need to
+	 * be careful here that somebody else didn't move or delete this item
+	 * while we were busy with other things.
+	 *
+	 * If it was on the list, we need a put() for the reference the list
+	 * had. Either way, we need a get() for the destination list.
+	 */
+	if (!list_empty(&sas_device->list)) {
+		list_del_init(&sas_device->list);
+		sas_device_put(sas_device);
+	}
+
+	sas_device_get(sas_device);
+	list_add_tail(&sas_device->list, &ioc->sas_device_list);
+
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
 /**
  * _scsih_probe_sas - reporting sas devices to sas transport
  * @ioc: per adapter object
@@ -7975,34 +8236,30 @@
 static void
 _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
 {
-	struct _sas_device *sas_device, *next;
-	unsigned long flags;
+	struct _sas_device *sas_device;
 
-	/* SAS Device List */
-	list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
-	    list) {
+	if (ioc->hide_drives)
+		return;
 
-		if (ioc->hide_drives)
-			continue;
-
+	while ((sas_device = get_next_sas_device(ioc))) {
 		if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
-		    sas_device->sas_address_parent)) {
-			list_del(&sas_device->list);
-			kfree(sas_device);
+				sas_device->sas_address_parent)) {
+			_scsih_sas_device_remove(ioc, sas_device);
+			sas_device_put(sas_device);
 			continue;
 		} else if (!sas_device->starget) {
 			if (!ioc->is_driver_loading) {
 				mpt2sas_transport_port_remove(ioc,
-					sas_device->sas_address,
-					sas_device->sas_address_parent);
-				list_del(&sas_device->list);
-				kfree(sas_device);
+						sas_device->sas_address,
+						sas_device->sas_address_parent);
+				_scsih_sas_device_remove(ioc, sas_device);
+				sas_device_put(sas_device);
 				continue;
 			}
 		}
-		spin_lock_irqsave(&ioc->sas_device_lock, flags);
-		list_move_tail(&sas_device->list, &ioc->sas_device_list);
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+		sas_device_make_active(ioc, sas_device);
+		sas_device_put(sas_device);
 	}
 }
 
@@ -8142,7 +8399,9 @@
 	ioc = shost_priv(shost);
 	memset(ioc, 0, sizeof(struct MPT2SAS_ADAPTER));
 	INIT_LIST_HEAD(&ioc->list);
+	spin_lock(&gioc_lock);
 	list_add_tail(&ioc->list, &mpt2sas_ioc_list);
+	spin_unlock(&gioc_lock);
 	ioc->shost = shost;
 	ioc->id = mpt_ids++;
 	sprintf(ioc->name, "%s%d", MPT2SAS_DRIVER_NAME, ioc->id);
@@ -8167,6 +8426,8 @@
 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
 	/* misc semaphores and spin locks */
 	mutex_init(&ioc->reset_in_progress_mutex);
+	/* initializing pci_access_mutex lock */
+	mutex_init(&ioc->pci_access_mutex);
 	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
 	spin_lock_init(&ioc->scsi_lookup_lock);
 	spin_lock_init(&ioc->sas_device_lock);
@@ -8269,7 +8530,9 @@
  out_attach_fail:
 	destroy_workqueue(ioc->firmware_event_thread);
  out_thread_fail:
+	spin_lock(&gioc_lock);
 	list_del(&ioc->list);
+	spin_unlock(&gioc_lock);
 	scsi_host_put(shost);
 	return rv;
 }
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index ff2500a..af86800 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1323,15 +1323,17 @@
 	int rc;
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+	sas_device = __mpt2sas_get_sdev_by_addr(ioc,
 	    rphy->identify.sas_address);
 	if (sas_device) {
 		*identifier = sas_device->enclosure_logical_id;
 		rc = 0;
+		sas_device_put(sas_device);
 	} else {
 		*identifier = 0;
 		rc = -ENXIO;
 	}
+
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	return rc;
 }
@@ -1351,12 +1353,14 @@
 	int rc;
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+	sas_device = __mpt2sas_get_sdev_by_addr(ioc,
 	    rphy->identify.sas_address);
-	if (sas_device)
+	if (sas_device) {
 		rc = sas_device->slot;
-	else
+		sas_device_put(sas_device);
+	} else {
 		rc = -ENXIO;
+	}
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	return rc;
 }
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index c34c115..ec27ad2 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
  *                 scatter/gather formats.
  * Creation Date:  June 21, 2006
  *
- * mpi2.h Version:  02.00.31
+ * mpi2.h Version:  02.00.35
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -88,6 +88,10 @@
  *                     Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET.
  * 04-09-13  02.00.30  Bumped MPI2_HEADER_VERSION_UNIT.
  * 04-17-13  02.00.31  Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-19-13  02.00.32  Bumped MPI2_HEADER_VERSION_UNIT.
+ * 12-05-13  02.00.33  Bumped MPI2_HEADER_VERSION_UNIT.
+ * 01-08-14  02.00.34  Bumped MPI2_HEADER_VERSION_UNIT
+ * 06-13-14  02.00.35  Bumped MPI2_HEADER_VERSION_UNIT.
  * --------------------------------------------------------------------------
  */
 
@@ -121,7 +125,7 @@
 #define MPI2_VERSION_02_05                  (0x0205)
 
 /*Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x1F)
+#define MPI2_HEADER_VERSION_UNIT            (0x23)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index e261a31..581fdb3 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
  *         Title:  MPI Configuration messages and pages
  * Creation Date:  November 10, 2006
  *
- *   mpi2_cnfg.h Version:  02.00.26
+ *   mpi2_cnfg.h Version:  02.00.29
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -165,6 +165,20 @@
  *                     match the specification.
  * 08-19-13  02.00.26  Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for
  *			future use.
+ * 12-05-13  02.00.27  Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for
+ *		       MPI2_CONFIG_PAGE_MAN_7.
+ *		       Added EnclosureLevel and ConnectorName fields to
+ *		       MPI2_CONFIG_PAGE_SAS_DEV_0.
+ *		       Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for
+ *		       MPI2_CONFIG_PAGE_SAS_DEV_0.
+ *		       Added EnclosureLevel field to
+ *		       MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ *		       Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for
+ *		       MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * 01-08-14  02.00.28  Added more defines for the BiosOptions field of
+ *		       MPI2_CONFIG_PAGE_BIOS_1.
+ * 06-13-14  02.00.29  Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and
+ *		       more defines for the BiosOptions field..
  * --------------------------------------------------------------------------
  */
 
@@ -724,6 +738,7 @@
 #define MPI2_MANUFACTURING7_PAGEVERSION                 (0x01)
 
 /*defines for the Flags field */
+#define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL         (0x00000008)
 #define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER       (0x00000002)
 #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO                (0x00000001)
 
@@ -1311,7 +1326,9 @@
 	MPI2_CONFIG_PAGE_HEADER Header;                     /*0x00 */
 	U32                     BiosOptions;                /*0x04 */
 	U32                     IOCSettings;                /*0x08 */
-	U32                     Reserved1;                  /*0x0C */
+	U8                      SSUTimeout;                 /*0x0C */
+	U8                      Reserved1;                  /*0x0D */
+	U16                     Reserved2;                  /*0x0E */
 	U32                     DeviceSettings;             /*0x10 */
 	U16                     NumberOfDevices;            /*0x14 */
 	U16                     UEFIVersion;                /*0x16 */
@@ -1323,9 +1340,24 @@
 	*PTR_MPI2_CONFIG_PAGE_BIOS_1,
 	Mpi2BiosPage1_t, *pMpi2BiosPage1_t;
 
-#define MPI2_BIOSPAGE1_PAGEVERSION                      (0x05)
+#define MPI2_BIOSPAGE1_PAGEVERSION                      (0x07)
 
 /*values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK                         (0x00003800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL                        (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE                   (0x00000800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID                        (0x00001000)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS                        (0x00001800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY                        (0x00002000)
+
+#define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS		(0x00000400)
+
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD	(0x00000300)
+#define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD	(0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD	(0x00000100)
+#define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD	(0x00000200)
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD	(0x00000300)
+
 #define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID                  (0x000000F0)
 #define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID                   (0x00000000)
 
@@ -2633,9 +2665,9 @@
 	U8
 		ControlGroup;           /*0x2E */
 	U8
-		Reserved1;              /*0x2F */
+		EnclosureLevel;		/*0x2F */
 	U32
-		Reserved2;              /*0x30 */
+		ConnectorName[4];	/*0x30 */
 	U32
 		Reserved3;              /*0x34 */
 } MPI2_CONFIG_PAGE_SAS_DEV_0,
@@ -2643,7 +2675,7 @@
 	Mpi2SasDevicePage0_t,
 	*pMpi2SasDevicePage0_t;
 
-#define MPI2_SASDEVICE0_PAGEVERSION         (0x08)
+#define MPI2_SASDEVICE0_PAGEVERSION         (0x09)
 
 /*values for SAS Device Page 0 AccessStatus field */
 #define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS                  (0x00)
@@ -2683,6 +2715,7 @@
 #define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED           (0x0020)
 #define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED           (0x0010)
 #define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH         (0x0008)
+#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID             (0x0002)
 #define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT               (0x0001)
 
 
@@ -3019,8 +3052,10 @@
 		NumSlots;                   /*0x18 */
 	U16
 		StartSlot;                  /*0x1A */
-	U16
+	U8
 		Reserved2;                  /*0x1C */
+	U8
+		EnclosureLevel;		    /*0x1D */
 	U16
 		SEPDevHandle;               /*0x1E */
 	U32
@@ -3031,9 +3066,10 @@
 	*PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
 	Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t;
 
-#define MPI2_SASENCLOSURE0_PAGEVERSION      (0x03)
+#define MPI2_SASENCLOSURE0_PAGEVERSION      (0x04)
 
 /*values for SAS Enclosure Page 0 Flags field */
+#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID      (0x0010)
 #define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK              (0x000F)
 #define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN           (0x0000)
 #define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES           (0x0001)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 4908309..d7598cc 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
  *         Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  * Creation Date:  October 11, 2006
  *
- * mpi2_ioc.h Version:  02.00.23
+ * mpi2_ioc.h Version:  02.00.24
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -132,6 +132,7 @@
  *			Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
  *			Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
  *			Added Encrypted Hash Extended Image.
+ * 12-05-13  02.00.24  Added MPI25_HASH_IMAGE_TYPE_BIOS.
  * --------------------------------------------------------------------------
  */
 
@@ -1598,6 +1599,7 @@
 /* values for HashImageType */
 #define MPI25_HASH_IMAGE_TYPE_UNUSED		(0x00)
 #define MPI25_HASH_IMAGE_TYPE_FIRMWARE		(0x01)
+#define MPI25_HASH_IMAGE_TYPE_BIOS              (0x02)
 
 /* values for HashAlgorithm */
 #define MPI25_HASH_ALGORITHM_UNUSED		(0x00)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 904910d..1629e5b 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
  *         Title:  MPI diagnostic tool structures and definitions
  * Creation Date:  March 26, 2007
  *
- *   mpi2_tool.h Version:  02.00.11
+ *   mpi2_tool.h Version:  02.00.12
  *
  * Version History
  * ---------------
@@ -33,6 +33,7 @@
  * 07-26-12  02.00.10  Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
  *                     it uses MPI Chain SGE as well as MPI Simple SGE.
  * 08-19-13  02.00.11  Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
+ * 01-08-14  02.00.12  Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC.
  * --------------------------------------------------------------------------
  */
 
@@ -100,6 +101,7 @@
 #define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES      (0x20000000)
 #define MPI2_TOOLBOX_CLEAN_FW_CURRENT               (0x10000000)
 #define MPI2_TOOLBOX_CLEAN_FW_BACKUP                (0x08000000)
+#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC   (0x04000000)
 #define MPI2_TOOLBOX_CLEAN_MEGARAID                 (0x02000000)
 #define MPI2_TOOLBOX_CLEAN_INITIALIZATION           (0x01000000)
 #define MPI2_TOOLBOX_CLEAN_FLASH                    (0x00000004)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 43f87e9..d4f1dcd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -83,10 +83,10 @@
 module_param(msix_disable, int, 0);
 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
 
-static int max_msix_vectors = 8;
+static int max_msix_vectors = -1;
 module_param(max_msix_vectors, int, 0);
 MODULE_PARM_DESC(max_msix_vectors,
-	" max msix vectors - (default=8)");
+	" max msix vectors");
 
 static int mpt3sas_fwfault_debug;
 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
@@ -1009,8 +1009,30 @@
 	}
 
 	wmb();
-	writel(reply_q->reply_post_host_index | (msix_index <<
-	    MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
+
+	/* Update Reply Post Host Index.
+	 * For those HBA's which support combined reply queue feature
+	 * 1. Get the correct Supplemental Reply Post Host Index Register.
+	 *    i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
+	 *    Index Register address bank i.e replyPostRegisterIndex[],
+	 * 2. Then update this register with new reply host index value
+	 *    in ReplyPostIndex field and the MSIxIndex field with
+	 *    msix_index value reduced to a value between 0 and 7,
+	 *    using a modulo 8 operation. Since each Supplemental Reply Post
+	 *    Host Index Register supports 8 MSI-X vectors.
+	 *
+	 * For other HBA's just update the Reply Post Host Index register with
+	 * new reply host index value in ReplyPostIndex Field and msix_index
+	 * value in MSIxIndex field.
+	 */
+	if (ioc->msix96_vector)
+		writel(reply_q->reply_post_host_index | ((msix_index  & 7) <<
+			MPI2_RPHI_MSIX_INDEX_SHIFT),
+			ioc->replyPostRegisterIndex[msix_index/8]);
+	else
+		writel(reply_q->reply_post_host_index | (msix_index <<
+			MPI2_RPHI_MSIX_INDEX_SHIFT),
+			&ioc->chip->ReplyPostHostIndex);
 	atomic_dec(&reply_q->busy);
 	return IRQ_HANDLED;
 }
@@ -1338,7 +1360,7 @@
 
 	sg_scmd = scsi_sglist(scmd);
 	sges_left = scsi_dma_map(scmd);
-	if (!sges_left) {
+	if (sges_left < 0) {
 		sdev_printk(KERN_ERR, scmd->device,
 			"pci_map_sg failed: request for %d bytes!\n",
 			scsi_bufflen(scmd));
@@ -1407,7 +1429,7 @@
  fill_in_last_segment:
 
 	/* fill the last segment */
-	while (sges_left) {
+	while (sges_left > 0) {
 		if (sges_left == 1)
 			_base_add_sg_single_ieee(sg_local,
 			    simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
@@ -1560,8 +1582,6 @@
 
 	pci_read_config_word(ioc->pdev, base + 2, &message_control);
 	ioc->msix_vector_count = (message_control & 0x3FF) + 1;
-	if (ioc->msix_vector_count > 8)
-		ioc->msix_vector_count = 8;
 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
 		"msix is supported, vector_count(%d)\n",
 		ioc->name, ioc->msix_vector_count));
@@ -1793,6 +1813,36 @@
 }
 
 /**
+ * mpt3sas_base_unmap_resources - free controller resources
+ * @ioc: per adapter object
+ */
+void
+mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
+{
+	struct pci_dev *pdev = ioc->pdev;
+
+	dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
+		ioc->name, __func__));
+
+	_base_free_irq(ioc);
+	_base_disable_msix(ioc);
+
+	if (ioc->msix96_vector)
+		kfree(ioc->replyPostRegisterIndex);
+
+	if (ioc->chip_phys) {
+		iounmap(ioc->chip);
+		ioc->chip_phys = 0;
+	}
+
+	if (pci_is_enabled(pdev)) {
+		pci_release_selected_regions(ioc->pdev, ioc->bars);
+		pci_disable_pcie_error_reporting(pdev);
+		pci_disable_device(pdev);
+	}
+}
+
+/**
  * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
  * @ioc: per adapter object
  *
@@ -1882,6 +1932,36 @@
 	if (r)
 		goto out_fail;
 
+	/* Use the Combined reply queue feature only for SAS3 C0 & higher
+	 * revision HBAs and also only when reply queue count is greater than 8
+	 */
+	if (ioc->msix96_vector && ioc->reply_queue_count > 8) {
+		/* Determine the Supplemental Reply Post Host Index Registers
+		 * Addresse. Supplemental Reply Post Host Index Registers
+		 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
+		 * each register is at offset bytes of
+		 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
+		 */
+		ioc->replyPostRegisterIndex = kcalloc(
+		     MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT,
+		     sizeof(resource_size_t *), GFP_KERNEL);
+		if (!ioc->replyPostRegisterIndex) {
+			dfailprintk(ioc, printk(MPT3SAS_FMT
+			"allocation for reply Post Register Index failed!!!\n",
+								   ioc->name));
+			r = -ENOMEM;
+			goto out_fail;
+		}
+
+		for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) {
+			ioc->replyPostRegisterIndex[i] = (resource_size_t *)
+			     ((u8 *)&ioc->chip->Doorbell +
+			     MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
+			     (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
+		}
+	} else
+		ioc->msix96_vector = 0;
+
 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
 		pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
 		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
@@ -1897,12 +1977,7 @@
 	return 0;
 
  out_fail:
-	if (ioc->chip_phys)
-		iounmap(ioc->chip);
-	ioc->chip_phys = 0;
-	pci_release_selected_regions(ioc->pdev, ioc->bars);
-	pci_disable_pcie_error_reporting(pdev);
-	pci_disable_device(pdev);
+	mpt3sas_base_unmap_resources(ioc);
 	return r;
 }
 
@@ -2292,6 +2367,99 @@
 
 
 /**
+ * _base_display_dell_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_dell_branding(struct MPT3SAS_ADAPTER *ioc)
+{
+	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL)
+		return;
+
+	switch (ioc->pdev->device) {
+	case MPI25_MFGPAGE_DEVID_SAS3008:
+		switch (ioc->pdev->subsystem_device) {
+		case MPT3SAS_DELL_12G_HBA_SSDID:
+			pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+				MPT3SAS_DELL_12G_HBA_BRANDING);
+			break;
+		default:
+			pr_info(MPT3SAS_FMT
+			   "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", ioc->name,
+			   ioc->pdev->subsystem_device);
+			break;
+		}
+		break;
+	default:
+		pr_info(MPT3SAS_FMT
+			"Dell 12Gbps HBA: Subsystem ID: 0x%X\n", ioc->name,
+			ioc->pdev->subsystem_device);
+		break;
+	}
+}
+
+/**
+ * _base_display_cisco_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_cisco_branding(struct MPT3SAS_ADAPTER *ioc)
+{
+	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_CISCO)
+		return;
+
+	switch (ioc->pdev->device) {
+	case MPI25_MFGPAGE_DEVID_SAS3008:
+		switch (ioc->pdev->subsystem_device) {
+		case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
+			pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+				MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
+			break;
+		case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
+			pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+				MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
+			break;
+		case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
+			pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+				MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+			break;
+		default:
+			pr_info(MPT3SAS_FMT
+			  "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+			  ioc->name, ioc->pdev->subsystem_device);
+			break;
+		}
+		break;
+	case MPI25_MFGPAGE_DEVID_SAS3108_1:
+		switch (ioc->pdev->subsystem_device) {
+		case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
+			pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+			MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+			break;
+		case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
+			pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+			MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
+			break;
+		default:
+			pr_info(MPT3SAS_FMT
+			 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+			 ioc->name, ioc->pdev->subsystem_device);
+			break;
+		}
+		break;
+	default:
+		 pr_info(MPT3SAS_FMT
+			"Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+			ioc->name, ioc->pdev->subsystem_device);
+		break;
+	}
+}
+
+/**
  * _base_display_ioc_capabilities - Disply IOC's capabilities.
  * @ioc: per adapter object
  *
@@ -2321,6 +2489,8 @@
 	    bios_version & 0x000000FF);
 
 	_base_display_intel_branding(ioc);
+	_base_display_dell_branding(ioc);
+	_base_display_cisco_branding(ioc);
 
 	pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
 
@@ -3139,6 +3309,9 @@
  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
  */
 static int
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
+
+static int
 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
 	int sleep_flag)
 {
@@ -3681,6 +3854,64 @@
 }
 
 /**
+ * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
+ * @ioc: per adapter object
+ * @timeout:
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
+	int sleep_flag)
+{
+	u32 ioc_state;
+	int rc;
+
+	dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
+	    __func__));
+
+	if (ioc->pci_error_recovery) {
+		dfailprintk(ioc, printk(MPT3SAS_FMT
+		    "%s: host in pci error recovery\n", ioc->name, __func__));
+		return -EFAULT;
+	}
+
+	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+	dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
+	    ioc->name, __func__, ioc_state));
+
+	if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
+	    (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
+		return 0;
+
+	if (ioc_state & MPI2_DOORBELL_USED) {
+		dhsprintk(ioc, printk(MPT3SAS_FMT
+		    "unexpected doorbell active!\n", ioc->name));
+		goto issue_diag_reset;
+	}
+
+	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+		mpt3sas_base_fault_info(ioc, ioc_state &
+		    MPI2_DOORBELL_DATA_MASK);
+		goto issue_diag_reset;
+	}
+
+	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
+	    timeout, sleep_flag);
+	if (ioc_state) {
+		dfailprintk(ioc, printk(MPT3SAS_FMT
+		    "%s: failed going to ready state (ioc_state=0x%x)\n",
+		    ioc->name, __func__, ioc_state));
+		return -EFAULT;
+	}
+
+ issue_diag_reset:
+	rc = _base_diag_reset(ioc, sleep_flag);
+	return rc;
+}
+
+/**
  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
  * @ioc: per adapter object
  * @sleep_flag: CAN_SLEEP or NO_SLEEP
@@ -3698,6 +3929,13 @@
 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
 	    __func__));
 
+	r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
+	if (r) {
+		dfailprintk(ioc, printk(MPT3SAS_FMT
+		    "%s: failed getting to correct state\n",
+		    ioc->name, __func__));
+		return r;
+	}
 	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
 	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
 	memset(&mpi_request, 0, mpi_request_sz);
@@ -3783,7 +4021,7 @@
 	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
 	mpi_request.VF_ID = 0; /* TODO */
 	mpi_request.VP_ID = 0;
-	mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
+	mpi_request.MsgVersion = cpu_to_le16(MPI25_VERSION);
 	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
 
 	if (_base_is_controller_msix_enabled(ioc))
@@ -4524,8 +4762,15 @@
 
 	/* initialize reply post host index */
 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
-		writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
-		    &ioc->chip->ReplyPostHostIndex);
+		if (ioc->msix96_vector)
+			writel((reply_q->msix_index & 7)<<
+			   MPI2_RPHI_MSIX_INDEX_SHIFT,
+			   ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
+		else
+			writel(reply_q->msix_index <<
+				MPI2_RPHI_MSIX_INDEX_SHIFT,
+				&ioc->chip->ReplyPostHostIndex);
+
 		if (!_base_is_controller_msix_enabled(ioc))
 			goto skip_init_reply_post_host_index;
 	}
@@ -4564,8 +4809,6 @@
 void
 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
 {
-	struct pci_dev *pdev = ioc->pdev;
-
 	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
 	    __func__));
 
@@ -4576,18 +4819,7 @@
 		ioc->shost_recovery = 0;
 	}
 
-	_base_free_irq(ioc);
-	_base_disable_msix(ioc);
-
-	if (ioc->chip_phys && ioc->chip)
-		iounmap(ioc->chip);
-	ioc->chip_phys = 0;
-
-	if (pci_is_enabled(pdev)) {
-		pci_release_selected_regions(ioc->pdev, ioc->bars);
-		pci_disable_pcie_error_reporting(pdev);
-		pci_disable_device(pdev);
-	}
+	mpt3sas_base_unmap_resources(ioc);
 	return;
 }
 
@@ -4602,6 +4834,7 @@
 {
 	int r, i;
 	int cpu_id, last_cpu_id = 0;
+	u8 revision;
 
 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
 	    __func__));
@@ -4621,6 +4854,20 @@
 		goto out_free_resources;
 	}
 
+	/* Check whether the controller revision is C0 or above.
+	 * only C0 and above revision controllers support 96 MSI-X vectors.
+	 */
+	revision = ioc->pdev->revision;
+
+	if ((ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3004 ||
+	     ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3008 ||
+	     ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_1 ||
+	     ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_2 ||
+	     ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_5 ||
+	     ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_6) &&
+	     (revision >= 0x02))
+		ioc->msix96_vector = 1;
+
 	ioc->rdpq_array_enable_assigned = 0;
 	ioc->dma_mask = 0;
 	r = mpt3sas_base_map_resources(ioc);
@@ -4643,7 +4890,6 @@
 	ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
 	ioc->build_sg = &_base_build_sg_ieee;
 	ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
-	ioc->mpi25 = 1;
 	ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
 
 	/*
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index afa8816..f0e462b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -71,8 +71,8 @@
 #define MPT3SAS_DRIVER_NAME		"mpt3sas"
 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
 #define MPT3SAS_DESCRIPTION	"LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION		"04.100.00.00"
-#define MPT3SAS_MAJOR_VERSION		4
+#define MPT3SAS_DRIVER_VERSION		"09.100.00.00"
+#define MPT3SAS_MAJOR_VERSION		9
 #define MPT3SAS_MINOR_VERSION		100
 #define MPT3SAS_BUILD_VERSION		0
 #define MPT3SAS_RELEASE_VERSION	00
@@ -152,12 +152,49 @@
 #define MPT3SAS_INTEL_RS3UC080_SSDID    0x3524
 
 /*
+ * Dell HBA branding
+ */
+#define MPT3SAS_DELL_12G_HBA_BRANDING       \
+	"Dell 12Gbps HBA"
+
+/*
+ * Dell HBA SSDIDs
+ */
+#define MPT3SAS_DELL_12G_HBA_SSDID	0x1F46
+
+/*
+ * Cisco HBA branding
+ */
+#define MPT3SAS_CISCO_12G_8E_HBA_BRANDING		\
+		"Cisco 9300-8E 12G SAS HBA"
+#define MPT3SAS_CISCO_12G_8I_HBA_BRANDING		\
+		"Cisco 9300-8i 12G SAS HBA"
+#define MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING	\
+		"Cisco 12G Modular SAS Pass through Controller"
+#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING		\
+		"UCS C3X60 12G SAS Pass through Controller"
+/*
+ * Cisco HBA SSSDIDs
+ */
+#define MPT3SAS_CISCO_12G_8E_HBA_SSDID  0x14C
+#define MPT3SAS_CISCO_12G_8I_HBA_SSDID  0x154
+#define MPT3SAS_CISCO_12G_AVILA_HBA_SSDID  0x155
+#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID  0x156
+
+/*
  * status bits for ioc->diag_buffer_status
  */
 #define MPT3_DIAG_BUFFER_IS_REGISTERED	(0x01)
 #define MPT3_DIAG_BUFFER_IS_RELEASED	(0x02)
 #define MPT3_DIAG_BUFFER_IS_DIAG_RESET	(0x04)
 
+/*
+ * Combined Reply Queue constants,
+ * There are twelve Supplemental Reply Post Host Index Registers
+ * and each register is at offset 0x10 bytes from the previous one.
+ */
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT 12
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
 
 /* OEM Identifiers */
 #define MFG10_OEM_ID_INVALID                   (0x00000000)
@@ -173,6 +210,8 @@
 #define MFG10_GF0_SSD_DATA_SCRUB_DISABLE       (0x00000008)
 #define MFG10_GF0_SINGLE_DRIVE_R0              (0x00000010)
 
+#define VIRTUAL_IO_FAILED_RETRY			(0x32010081)
+
 /* OEM Specific Flags will come from OEM specific header files */
 struct Mpi2ManufacturingPage10_t {
 	MPI2_CONFIG_PAGE_HEADER	Header;		/* 00h */
@@ -294,7 +333,8 @@
  * @responding: used in _scsih_sas_device_mark_responding
  * @fast_path: fast path feature enable bit
  * @pfa_led_on: flag for PFA LED status
- *
+ * @pend_sas_rphy_add: flag to check if device is in sas_rphy_add()
+ *	addition routine.
  */
 struct _sas_device {
 	struct list_head list;
@@ -315,6 +355,9 @@
 	u8	responding;
 	u8	fast_path;
 	u8	pfa_led_on;
+	u8	pend_sas_rphy_add;
+	u8	enclosure_level;
+	u8	connector_name[4];
 };
 
 /**
@@ -728,7 +771,8 @@
  *				is assigned only ones
  * @reply_queue_count: number of reply queue's
  * @reply_queue_list: link list contaning the reply queue info
- * @reply_post_host_index: head index in the pool where FW completes IO
+ * @msix96_vector: 96 MSI-X vector support
+ * @replyPostRegisterIndex: index of next position in Reply Desc Post Queue
  * @delayed_tr_list: target reset link list
  * @delayed_tr_volume_list: volume target reset link list
  * @@temp_sensors_count: flag to carry the number of temperature sensors
@@ -814,7 +858,6 @@
 	MPT_BUILD_SG_SCMD build_sg_scmd;
 	MPT_BUILD_SG    build_sg;
 	MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge;
-	u8              mpi25;
 	u16             sge_size_ieee;
 
 	/* function ptr for MPI sg elements only */
@@ -937,6 +980,10 @@
 	u8		reply_queue_count;
 	struct list_head reply_queue_list;
 
+	u8		msix96_vector;
+	/* reply post register index */
+	resource_size_t	**replyPostRegisterIndex;
+
 	struct list_head delayed_tr_list;
 	struct list_head delayed_tr_volume_list;
 	u8		temp_sensors_count;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 5a97e32..8ccef38 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -585,6 +585,22 @@
 
 	if (!sas_device)
 		return;
+	pr_info(MPT3SAS_FMT
+	    "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+	    ioc->name, sas_device->handle,
+	    (unsigned long long) sas_device->sas_address);
+
+	if (sas_device->enclosure_handle != 0)
+		pr_info(MPT3SAS_FMT
+		   "removing enclosure logical id(0x%016llx), slot(%d)\n",
+		   ioc->name, (unsigned long long)
+		   sas_device->enclosure_logical_id, sas_device->slot);
+
+	if (sas_device->connector_name[0] != '\0')
+		pr_info(MPT3SAS_FMT
+		   "removing enclosure level(0x%04x), connector name( %s)\n",
+		   ioc->name, sas_device->enclosure_level,
+		   sas_device->connector_name);
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	list_del(&sas_device->list);
@@ -663,6 +679,18 @@
 		ioc->name, __func__, sas_device->handle,
 		(unsigned long long)sas_device->sas_address));
 
+	if (sas_device->enclosure_handle != 0)
+		dewtprintk(ioc, pr_info(MPT3SAS_FMT
+		    "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+		    ioc->name, __func__, (unsigned long long)
+		    sas_device->enclosure_logical_id, sas_device->slot));
+
+	if (sas_device->connector_name[0] != '\0')
+		dewtprintk(ioc, pr_info(MPT3SAS_FMT
+		    "%s: enclosure level(0x%04x), connector name( %s)\n",
+		    ioc->name, __func__,
+		    sas_device->enclosure_level, sas_device->connector_name));
+
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -704,6 +732,18 @@
 		__func__, sas_device->handle,
 		(unsigned long long)sas_device->sas_address));
 
+	if (sas_device->enclosure_handle != 0)
+		dewtprintk(ioc, pr_info(MPT3SAS_FMT
+		    "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+		    ioc->name, __func__, (unsigned long long)
+		    sas_device->enclosure_logical_id, sas_device->slot));
+
+	if (sas_device->connector_name[0] != '\0')
+		dewtprintk(ioc, pr_info(MPT3SAS_FMT
+		    "%s: enclosure level(0x%04x), connector name( %s)\n",
+		    ioc->name, __func__, sas_device->enclosure_level,
+		    sas_device->connector_name));
+
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
 	_scsih_determine_boot_device(ioc, sas_device, 0);
@@ -1772,10 +1812,16 @@
 	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
 	    ds, handle, (unsigned long long)sas_device->sas_address,
 	    sas_device->phy, (unsigned long long)sas_device->device_name);
-	sdev_printk(KERN_INFO, sdev,
-		"%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
-		ds, (unsigned long long)
-	    sas_device->enclosure_logical_id, sas_device->slot);
+	if (sas_device->enclosure_handle != 0)
+		sdev_printk(KERN_INFO, sdev,
+		     "%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
+		     ds, (unsigned long long)
+		     sas_device->enclosure_logical_id, sas_device->slot);
+	if (sas_device->connector_name[0] != '\0')
+		sdev_printk(KERN_INFO, sdev,
+		     "%s: enclosure level(0x%04x), connector name( %s)\n",
+		     ds, sas_device->enclosure_level,
+		     sas_device->connector_name);
 
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
@@ -2189,10 +2235,17 @@
 			    sas_device->handle,
 			    (unsigned long long)sas_device->sas_address,
 			    sas_device->phy);
-			starget_printk(KERN_INFO, starget,
-			    "enclosure_logical_id(0x%016llx), slot(%d)\n",
-			   (unsigned long long)sas_device->enclosure_logical_id,
-			    sas_device->slot);
+			if (sas_device->enclosure_handle != 0)
+				starget_printk(KERN_INFO, starget,
+				 "enclosure_logical_id(0x%016llx), slot(%d)\n",
+				 (unsigned long long)
+				 sas_device->enclosure_logical_id,
+				 sas_device->slot);
+			if (sas_device->connector_name)
+				starget_printk(KERN_INFO, starget,
+				"enclosure level(0x%04x),connector name(%s)\n",
+				 sas_device->enclosure_level,
+				 sas_device->connector_name);
 		}
 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	}
@@ -2552,6 +2605,75 @@
 }
 
 /**
+ * _scsih_internal_device_block - block the sdev device
+ * @sdev: per device object
+ * @sas_device_priv_data : per device driver private data
+ *
+ * make sure device is blocked without error, if not
+ * print an error
+ */
+static void
+_scsih_internal_device_block(struct scsi_device *sdev,
+			struct MPT3SAS_DEVICE *sas_device_priv_data)
+{
+	int r = 0;
+
+	sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
+	    sas_device_priv_data->sas_target->handle);
+	sas_device_priv_data->block = 1;
+
+	r = scsi_internal_device_block(sdev);
+	if (r == -EINVAL)
+		sdev_printk(KERN_WARNING, sdev,
+		    "device_block failed with return(%d) for handle(0x%04x)\n",
+		    sas_device_priv_data->sas_target->handle, r);
+}
+
+/**
+ * _scsih_internal_device_unblock - unblock the sdev device
+ * @sdev: per device object
+ * @sas_device_priv_data : per device driver private data
+ * make sure device is unblocked without error, if not retry
+ * by blocking and then unblocking
+ */
+
+static void
+_scsih_internal_device_unblock(struct scsi_device *sdev,
+			struct MPT3SAS_DEVICE *sas_device_priv_data)
+{
+	int r = 0;
+
+	sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
+	    "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
+	sas_device_priv_data->block = 0;
+	r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+	if (r == -EINVAL) {
+		/* The device has been set to SDEV_RUNNING by SD layer during
+		 * device addition but the request queue is still stopped by
+		 * our earlier block call. We need to perform a block again
+		 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
+
+		sdev_printk(KERN_WARNING, sdev,
+		    "device_unblock failed with return(%d) for handle(0x%04x) "
+		    "performing a block followed by an unblock\n",
+		    sas_device_priv_data->sas_target->handle, r);
+		sas_device_priv_data->block = 1;
+		r = scsi_internal_device_block(sdev);
+		if (r)
+			sdev_printk(KERN_WARNING, sdev, "retried device_block "
+			    "failed with return(%d) for handle(0x%04x)\n",
+			    sas_device_priv_data->sas_target->handle, r);
+
+		sas_device_priv_data->block = 0;
+		r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+		if (r)
+			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
+			    " failed with return(%d) for handle(0x%04x)\n",
+			    sas_device_priv_data->sas_target->handle, r);
+	}
+}
+
+/**
  * _scsih_ublock_io_all_device - unblock every device
  * @ioc: per adapter object
  *
@@ -2570,11 +2692,10 @@
 		if (!sas_device_priv_data->block)
 			continue;
 
-		sas_device_priv_data->block = 0;
 		dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
 			"device_running, handle(0x%04x)\n",
 		    sas_device_priv_data->sas_target->handle));
-		scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
 	}
 }
 
@@ -2599,10 +2720,9 @@
 		if (sas_device_priv_data->sas_target->sas_address
 		    != sas_address)
 			continue;
-		if (sas_device_priv_data->block) {
-			sas_device_priv_data->block = 0;
-			scsi_internal_device_unblock(sdev, SDEV_RUNNING);
-		}
+		if (sas_device_priv_data->block)
+			_scsih_internal_device_unblock(sdev,
+				sas_device_priv_data);
 	}
 }
 
@@ -2625,10 +2745,7 @@
 			continue;
 		if (sas_device_priv_data->block)
 			continue;
-		sas_device_priv_data->block = 1;
-		scsi_internal_device_block(sdev);
-		sdev_printk(KERN_INFO, sdev, "device_blocked, handle(0x%04x)\n",
-		    sas_device_priv_data->sas_target->handle);
+		_scsih_internal_device_block(sdev, sas_device_priv_data);
 	}
 }
 
@@ -2644,6 +2761,11 @@
 {
 	struct MPT3SAS_DEVICE *sas_device_priv_data;
 	struct scsi_device *sdev;
+	struct _sas_device *sas_device;
+
+	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+	if (!sas_device)
+		return;
 
 	shost_for_each_device(sdev, ioc->shost) {
 		sas_device_priv_data = sdev->hostdata;
@@ -2653,10 +2775,9 @@
 			continue;
 		if (sas_device_priv_data->block)
 			continue;
-		sas_device_priv_data->block = 1;
-		scsi_internal_device_block(sdev);
-		sdev_printk(KERN_INFO, sdev,
-			"device_blocked, handle(0x%04x)\n", handle);
+		if (sas_device->pend_sas_rphy_add)
+			continue;
+		_scsih_internal_device_block(sdev, sas_device_priv_data);
 	}
 }
 
@@ -2806,6 +2927,18 @@
 			"setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
 			ioc->name, handle,
 		    (unsigned long long)sas_address));
+		if (sas_device->enclosure_handle != 0)
+			dewtprintk(ioc, pr_info(MPT3SAS_FMT
+			 "setting delete flag:enclosure logical id(0x%016llx),"
+			 " slot(%d)\n", ioc->name, (unsigned long long)
+			  sas_device->enclosure_logical_id,
+			  sas_device->slot));
+		if (sas_device->connector_name)
+			dewtprintk(ioc, pr_info(MPT3SAS_FMT
+			 "setting delete flag: enclosure level(0x%04x),"
+			 " connector name( %s)\n", ioc->name,
+			  sas_device->enclosure_level,
+			  sas_device->connector_name));
 		_scsih_ublock_io_device(ioc, sas_address);
 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
 	}
@@ -3821,10 +3954,19 @@
 				"\tsas_address(0x%016llx), phy(%d)\n",
 				ioc->name, (unsigned long long)
 			    sas_device->sas_address, sas_device->phy);
-			pr_warn(MPT3SAS_FMT
-			    "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
-			    ioc->name, (unsigned long long)
-			    sas_device->enclosure_logical_id, sas_device->slot);
+			if (sas_device->enclosure_handle != 0)
+				pr_warn(MPT3SAS_FMT
+				  "\tenclosure_logical_id(0x%016llx),"
+				  "slot(%d)\n", ioc->name,
+				  (unsigned long long)
+				  sas_device->enclosure_logical_id,
+				  sas_device->slot);
+			if (sas_device->connector_name[0])
+				pr_warn(MPT3SAS_FMT
+				  "\tenclosure level(0x%04x),"
+				  " connector name( %s)\n", ioc->name,
+				  sas_device->enclosure_level,
+				  sas_device->connector_name);
 		}
 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	}
@@ -3999,7 +4141,16 @@
 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 		return;
 	}
-	starget_printk(KERN_WARNING, starget, "predicted fault\n");
+	if (sas_device->enclosure_handle != 0)
+		starget_printk(KERN_INFO, starget, "predicted fault, "
+			"enclosure logical id(0x%016llx), slot(%d)\n",
+			(unsigned long long)sas_device->enclosure_logical_id,
+			sas_device->slot);
+	if (sas_device->connector_name[0] != '\0')
+		starget_printk(KERN_WARNING, starget, "predicted fault, "
+			"enclosure level(0x%04x), connector name( %s)\n",
+			sas_device->enclosure_level,
+			sas_device->connector_name);
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
@@ -4119,8 +4270,15 @@
 			_scsih_smart_predicted_fault(ioc,
 			    le16_to_cpu(mpi_reply->DevHandle));
 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
-	}
 
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+		if (!(ioc->logging_level & MPT_DEBUG_REPLY) &&
+		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
+		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
+		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
+			_scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
+#endif
+	}
 	switch (ioc_status) {
 	case MPI2_IOCSTATUS_BUSY:
 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
@@ -4146,6 +4304,9 @@
 				scmd->device->expecting_cc_ua = 1;
 			}
 			break;
+		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
+			scmd->result = DID_RESET << 16;
+			break;
 		}
 		scmd->result = DID_SOFT_ERROR << 16;
 		break;
@@ -4788,6 +4949,16 @@
 			sas_device->handle, handle);
 		sas_target_priv_data->handle = handle;
 		sas_device->handle = handle;
+		if (sas_device_pg0.Flags &
+		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+			sas_device->enclosure_level =
+				le16_to_cpu(sas_device_pg0.EnclosureLevel);
+			memcpy(&sas_device->connector_name[0],
+				&sas_device_pg0.ConnectorName[0], 4);
+		} else {
+			sas_device->enclosure_level = 0;
+			sas_device->connector_name[0] = '\0';
+		}
 	}
 
 	/* check if device is present */
@@ -4894,14 +5065,24 @@
 		    ioc->name, __FILE__, __LINE__, __func__);
 	sas_device->enclosure_handle =
 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
-	sas_device->slot =
-	    le16_to_cpu(sas_device_pg0.Slot);
+	if (sas_device->enclosure_handle != 0)
+		sas_device->slot =
+		    le16_to_cpu(sas_device_pg0.Slot);
 	sas_device->device_info = device_info;
 	sas_device->sas_address = sas_address;
 	sas_device->phy = sas_device_pg0.PhyNum;
 	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
 	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
 
+	if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+		sas_device->enclosure_level =
+			le16_to_cpu(sas_device_pg0.EnclosureLevel);
+		memcpy(&sas_device->connector_name[0],
+			&sas_device_pg0.ConnectorName[0], 4);
+	} else {
+		sas_device->enclosure_level = 0;
+		sas_device->connector_name[0] = '\0';
+	}
 	/* get enclosure_logical_id */
 	if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0(
 	   ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
@@ -4943,6 +5124,18 @@
 		ioc->name, __func__,
 	    sas_device->handle, (unsigned long long)
 	    sas_device->sas_address));
+	if (sas_device->enclosure_handle != 0)
+		dewtprintk(ioc, pr_info(MPT3SAS_FMT
+		    "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
+		    ioc->name, __func__,
+		    (unsigned long long)sas_device->enclosure_logical_id,
+		    sas_device->slot));
+	if (sas_device->connector_name[0] != '\0')
+		dewtprintk(ioc, pr_info(MPT3SAS_FMT
+		  "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
+		  ioc->name, __func__,
+		  sas_device->enclosure_level,
+		  sas_device->connector_name));
 
 	if (sas_device->starget && sas_device->starget->hostdata) {
 		sas_target_priv_data = sas_device->starget->hostdata;
@@ -4959,12 +5152,34 @@
 		"removing handle(0x%04x), sas_addr(0x%016llx)\n",
 		ioc->name, sas_device->handle,
 	    (unsigned long long) sas_device->sas_address);
+	if (sas_device->enclosure_handle != 0)
+		pr_info(MPT3SAS_FMT
+		  "removing : enclosure logical id(0x%016llx), slot(%d)\n",
+		  ioc->name,
+		  (unsigned long long)sas_device->enclosure_logical_id,
+		  sas_device->slot);
+	if (sas_device->connector_name[0] != '\0')
+		pr_info(MPT3SAS_FMT
+		  "removing enclosure level(0x%04x), connector name( %s)\n",
+		  ioc->name, sas_device->enclosure_level,
+		  sas_device->connector_name);
 
 	dewtprintk(ioc, pr_info(MPT3SAS_FMT
 		"%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
 		ioc->name, __func__,
-	    sas_device->handle, (unsigned long long)
-	    sas_device->sas_address));
+		sas_device->handle, (unsigned long long)
+		sas_device->sas_address));
+	if (sas_device->enclosure_handle != 0)
+		dewtprintk(ioc, pr_info(MPT3SAS_FMT
+		    "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
+		    ioc->name, __func__,
+		    (unsigned long long)sas_device->enclosure_logical_id,
+		    sas_device->slot));
+	if (sas_device->connector_name[0] != '\0')
+		dewtprintk(ioc, pr_info(MPT3SAS_FMT
+		    "%s: exit: enclosure level(0x%04x), connector name(%s)\n",
+		    ioc->name, __func__, sas_device->enclosure_level,
+		    sas_device->connector_name));
 
 	kfree(sas_device);
 }
@@ -6357,9 +6572,7 @@
 /**
  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
  * @ioc: per adapter object
- * @sas_address: sas address
- * @slot: enclosure slot id
- * @handle: device handle
+ * @sas_device_pg0: SAS Device page 0
  *
  * After host reset, find out whether devices are still responding.
  * Used in _scsih_remove_unresponsive_sas_devices.
@@ -6367,8 +6580,8 @@
  * Return nothing.
  */
 static void
-_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
-	u16 slot, u16 handle)
+_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
+Mpi2SasDevicePage0_t *sas_device_pg0)
 {
 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
 	struct scsi_target *starget;
@@ -6377,8 +6590,8 @@
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
-		if (sas_device->sas_address == sas_address &&
-		    sas_device->slot == slot) {
+		if ((sas_device->sas_address == sas_device_pg0->SASAddress) &&
+			(sas_device->slot == sas_device_pg0->Slot)) {
 			sas_device->responding = 1;
 			starget = sas_device->starget;
 			if (starget && starget->hostdata) {
@@ -6387,22 +6600,40 @@
 				sas_target_priv_data->deleted = 0;
 			} else
 				sas_target_priv_data = NULL;
-			if (starget)
+			if (starget) {
 				starget_printk(KERN_INFO, starget,
-				    "handle(0x%04x), sas_addr(0x%016llx), "
-				    "enclosure logical id(0x%016llx), "
-				    "slot(%d)\n", handle,
-				    (unsigned long long)sas_device->sas_address,
+				    "handle(0x%04x), sas_addr(0x%016llx)\n",
+				    sas_device_pg0->DevHandle,
 				    (unsigned long long)
-				    sas_device->enclosure_logical_id,
-				    sas_device->slot);
-			if (sas_device->handle == handle)
+				    sas_device->sas_address);
+
+				if (sas_device->enclosure_handle != 0)
+					starget_printk(KERN_INFO, starget,
+					 "enclosure logical id(0x%016llx),"
+					 " slot(%d)\n",
+					 (unsigned long long)
+					 sas_device->enclosure_logical_id,
+					 sas_device->slot);
+			}
+			if (sas_device_pg0->Flags &
+			      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+				sas_device->enclosure_level =
+				   le16_to_cpu(sas_device_pg0->EnclosureLevel);
+				memcpy(&sas_device->connector_name[0],
+					&sas_device_pg0->ConnectorName[0], 4);
+			} else {
+				sas_device->enclosure_level = 0;
+				sas_device->connector_name[0] = '\0';
+			}
+
+			if (sas_device->handle == sas_device_pg0->DevHandle)
 				goto out;
 			pr_info("\thandle changed from(0x%04x)!!!\n",
 			    sas_device->handle);
-			sas_device->handle = handle;
+			sas_device->handle = sas_device_pg0->DevHandle;
 			if (sas_target_priv_data)
-				sas_target_priv_data->handle = handle;
+				sas_target_priv_data->handle =
+					sas_device_pg0->DevHandle;
 			goto out;
 		}
 	}
@@ -6441,13 +6672,15 @@
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
 			break;
-		handle = le16_to_cpu(sas_device_pg0.DevHandle);
+		handle = sas_device_pg0.DevHandle =
+				le16_to_cpu(sas_device_pg0.DevHandle);
 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
 		if (!(_scsih_is_end_device(device_info)))
 			continue;
-		_scsih_mark_responding_sas_device(ioc,
-		    le64_to_cpu(sas_device_pg0.SASAddress),
-		    le16_to_cpu(sas_device_pg0.Slot), handle);
+		sas_device_pg0.SASAddress =
+				le64_to_cpu(sas_device_pg0.SASAddress);
+		sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot);
+		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
 	}
 
  out:
@@ -7854,8 +8087,8 @@
 	/* event thread */
 	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
 	    "fw_event%d", ioc->id);
-	ioc->firmware_event_thread = create_singlethread_workqueue(
-	    ioc->firmware_event_name);
+	ioc->firmware_event_thread = alloc_ordered_workqueue(
+	    ioc->firmware_event_name, WQ_MEM_RECLAIM);
 	if (!ioc->firmware_event_thread) {
 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index efb98af..70fd019 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -649,6 +649,7 @@
 	unsigned long flags;
 	struct _sas_node *sas_node;
 	struct sas_rphy *rphy;
+	struct _sas_device *sas_device = NULL;
 	int i;
 	struct sas_port *port;
 
@@ -731,10 +732,27 @@
 		    mpt3sas_port->remote_identify.device_type);
 
 	rphy->identify = mpt3sas_port->remote_identify;
+
+	if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+		sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+				    mpt3sas_port->remote_identify.sas_address);
+		if (!sas_device) {
+			dfailprintk(ioc, printk(MPT3SAS_FMT
+				"failure at %s:%d/%s()!\n",
+				ioc->name, __FILE__, __LINE__, __func__));
+			goto out_fail;
+		}
+		sas_device->pend_sas_rphy_add = 1;
+	}
+
 	if ((sas_rphy_add(rphy))) {
 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
 	}
+
+	if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE)
+		sas_device->pend_sas_rphy_add = 0;
+
 	if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
 		dev_printk(KERN_INFO, &rphy->dev,
 			"add: handle(0x%04x), sas_addr(0x%016llx)\n",
@@ -1946,7 +1964,7 @@
 	} else {
 		dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
 		    blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
-		if (!dma_addr_out) {
+		if (pci_dma_mapping_error(ioc->pdev, dma_addr_out)) {
 			pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n",
 			    ioc->name, __func__);
 			rc = -ENOMEM;
@@ -1968,7 +1986,7 @@
 	} else {
 		dma_addr_in =  pci_map_single(ioc->pdev, bio_data(rsp->bio),
 		    blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
-		if (!dma_addr_in) {
+		if (pci_dma_mapping_error(ioc->pdev, dma_addr_in)) {
 			pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n",
 			    ioc->name, __func__);
 			rc = -ENOMEM;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 39306b1..04e67a1 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -2642,6 +2642,7 @@
 		ts->resp = SAS_TASK_COMPLETE;
 		ts->stat = SAS_OPEN_REJECT;
 		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
 	default:
 		PM8001_IO_DBG(pm8001_ha,
 			pm8001_printk("Unknown status 0x%x\n", status));
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 0e1628f..9a389f1 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2337,6 +2337,7 @@
 		ts->resp = SAS_TASK_COMPLETE;
 		ts->stat = SAS_OPEN_REJECT;
 		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
 	default:
 		PM8001_IO_DBG(pm8001_ha,
 			pm8001_printk("Unknown status 0x%x\n", status));
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 33f60c9..a0f732b 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -32,10 +32,10 @@
 	They are also included in the linux-firmware tree as well.
 
 config TCM_QLA2XXX
-	tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
+	tristate "TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs"
 	depends on SCSI_QLA_FC && TARGET_CORE
 	depends on LIBFC
 	select BTREE
 	default n
 	---help---
-	Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs
+	Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 7ed7bae..ac65cb7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1359,9 +1359,7 @@
 	struct qla_hw_data *ha = tgt->ha;
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 	struct se_session *se_sess;
-	struct se_node_acl *se_nacl;
 	struct tcm_qla2xxx_lport *lport;
-	struct tcm_qla2xxx_nacl *nacl;
 
 	BUG_ON(in_interrupt());
 
@@ -1371,8 +1369,6 @@
 		dump_stack();
 		return;
 	}
-	se_nacl = se_sess->se_node_acl;
-	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
 
 	lport = vha->vha_tgt.target_lport_ptr;
 	if (!lport) {
@@ -1680,7 +1676,6 @@
 			(struct tcm_qla2xxx_lport *)target_lport_ptr;
 	struct tcm_qla2xxx_lport *base_lport =
 			(struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
-	struct tcm_qla2xxx_tpg *base_tpg;
 	struct fc_vport_identifiers vport_id;
 
 	if (!qla_tgt_mode_enabled(base_vha)) {
@@ -1693,7 +1688,6 @@
 		pr_err("qla2xxx base_lport or tpg_1 not available\n");
 		return -EPERM;
 	}
-	base_tpg = base_lport->tpg_1;
 
 	memset(&vport_id, 0, sizeof(vport_id));
 	vport_id.port_name = npiv_wwpn;
@@ -1810,6 +1804,11 @@
 	.module				= THIS_MODULE,
 	.name				= "qla2xxx",
 	.node_acl_size			= sizeof(struct tcm_qla2xxx_nacl),
+	/*
+	 * XXX: Limit assumes single page per scatter-gather-list entry.
+	 * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096
+	 */
+	.max_data_sg_nents		= 1200,
 	.get_fabric_name		= tcm_qla2xxx_get_fabric_name,
 	.tpg_get_wwn			= tcm_qla2xxx_get_fabric_wwn,
 	.tpg_get_tag			= tcm_qla2xxx_get_tag,
@@ -1958,7 +1957,7 @@
 	tcm_qla2xxx_deregister_configfs();
 }
 
-MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
+MODULE_DESCRIPTION("TCM QLA24XX+ series NPIV enabled fabric driver");
 MODULE_LICENSE("GPL");
 module_init(tcm_qla2xxx_init);
 module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index 2ff0922..c126966 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -5,6 +5,8 @@
 #include <linux/bug.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <linux/errno.h>
+#include <asm/unaligned.h>
 #include <scsi/scsi_common.h>
 
 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
@@ -176,3 +178,110 @@
 	return true;
 }
 EXPORT_SYMBOL(scsi_normalize_sense);
+
+/**
+ * scsi_sense_desc_find - search for a given descriptor type in	descriptor sense data format.
+ * @sense_buffer:	byte array of descriptor format sense data
+ * @sb_len:		number of valid bytes in sense_buffer
+ * @desc_type:		value of descriptor type to find
+ *			(e.g. 0 -> information)
+ *
+ * Notes:
+ *	only valid when sense data is in descriptor format
+ *
+ * Return value:
+ *	pointer to start of (first) descriptor if found else NULL
+ */
+const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
+				int desc_type)
+{
+	int add_sen_len, add_len, desc_len, k;
+	const u8 * descp;
+
+	if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
+		return NULL;
+	if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
+		return NULL;
+	add_sen_len = (add_sen_len < (sb_len - 8)) ?
+			add_sen_len : (sb_len - 8);
+	descp = &sense_buffer[8];
+	for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
+		descp += desc_len;
+		add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
+		desc_len = add_len + 2;
+		if (descp[0] == desc_type)
+			return descp;
+		if (add_len < 0) // short descriptor ??
+			break;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(scsi_sense_desc_find);
+
+/**
+ * scsi_build_sense_buffer - build sense data in a buffer
+ * @desc:	Sense format (non zero == descriptor format,
+ *              0 == fixed format)
+ * @buf:	Where to build sense data
+ * @key:	Sense key
+ * @asc:	Additional sense code
+ * @ascq:	Additional sense code qualifier
+ *
+ **/
+void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
+{
+	if (desc) {
+		buf[0] = 0x72;	/* descriptor, current */
+		buf[1] = key;
+		buf[2] = asc;
+		buf[3] = ascq;
+		buf[7] = 0;
+	} else {
+		buf[0] = 0x70;	/* fixed, current */
+		buf[2] = key;
+		buf[7] = 0xa;
+		buf[12] = asc;
+		buf[13] = ascq;
+	}
+}
+EXPORT_SYMBOL(scsi_build_sense_buffer);
+
+/**
+ * scsi_set_sense_information - set the information field in a
+ *		formatted sense data buffer
+ * @buf:	Where to build sense data
+ * @buf_len:    buffer length
+ * @info:	64-bit information value to be set
+ *
+ * Return value:
+ *	0 on success or EINVAL for invalid sense buffer length
+ **/
+int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
+{
+	if ((buf[0] & 0x7f) == 0x72) {
+		u8 *ucp, len;
+
+		len = buf[7];
+		ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
+		if (!ucp) {
+			buf[7] = len + 0xc;
+			ucp = buf + 8 + len;
+		}
+
+		if (buf_len < len + 0xc)
+			/* Not enough room for info */
+			return -EINVAL;
+
+		ucp[0] = 0;
+		ucp[1] = 0xa;
+		ucp[2] = 0x80; /* Valid bit */
+		ucp[3] = 0;
+		put_unaligned_be64(info, &ucp[4]);
+	} else if ((buf[0] & 0x7f) == 0x70) {
+		buf[0] |= 0x80;
+		put_unaligned_be64(info, &buf[3]);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(scsi_set_sense_information);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 30268bb..dfcc45b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -25,6 +25,9 @@
  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
  */
 
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
 #include <linux/module.h>
 
 #include <linux/kernel.h>
@@ -201,7 +204,6 @@
 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
  * or "peripheral device" addressing (value 0) */
 #define SAM2_LUN_ADDRESS_METHOD 0
-#define SAM2_WLUN_REPORT_LUNS 0xc101
 
 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
  * (for response) at one time. Can be reduced by max_queue option. Command
@@ -698,7 +700,7 @@
 		else
 			hpnt->max_id = scsi_debug_num_tgts;
 		/* scsi_debug_max_luns; */
-		hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
+		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
 	}
 	spin_unlock(&sdebug_host_list_lock);
 }
@@ -1288,7 +1290,7 @@
 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
 	if (! arr)
 		return DID_REQUEUE << 16;
-	have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS);
+	have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS);
 	if (have_wlun)
 		pq_pdt = 0x1e;	/* present, wlun */
 	else if (scsi_debug_no_lun_0 && (0 == devip->lun))
@@ -1427,12 +1429,11 @@
 	unsigned char * sbuff;
 	unsigned char *cmd = scp->cmnd;
 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
-	bool dsense, want_dsense;
+	bool dsense;
 	int len = 18;
 
 	memset(arr, 0, sizeof(arr));
 	dsense = !!(cmd[1] & 1);
-	want_dsense = dsense || scsi_debug_dsense;
 	sbuff = scp->sense_buffer;
 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
 		if (dsense) {
@@ -2446,8 +2447,7 @@
 	__be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
 
 	if (sdt->guard_tag != csum) {
-		pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
-			__func__,
+		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
 			(unsigned long)sector,
 			be16_to_cpu(sdt->guard_tag),
 			be16_to_cpu(csum));
@@ -2455,14 +2455,14 @@
 	}
 	if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
-		pr_err("%s: REF check failed on sector %lu\n",
-			__func__, (unsigned long)sector);
+		pr_err("REF check failed on sector %lu\n",
+			(unsigned long)sector);
 		return 0x03;
 	}
 	if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
-		pr_err("%s: REF check failed on sector %lu\n",
-			__func__, (unsigned long)sector);
+		pr_err("REF check failed on sector %lu\n",
+			(unsigned long)sector);
 		return 0x03;
 	}
 	return 0;
@@ -2680,7 +2680,7 @@
 	return 0;
 }
 
-void dump_sector(unsigned char *buf, int len)
+static void dump_sector(unsigned char *buf, int len)
 {
 	int i, j, n;
 
@@ -3365,8 +3365,8 @@
 		one_lun[i].scsi_lun[1] = lun & 0xff;
 	}
 	if (want_wlun) {
-		one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
-		one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
+		one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff;
+		one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff;
 		i++;
 	}
 	alloc_len = (unsigned char *)(one_lun + i) - arr;
@@ -3449,7 +3449,7 @@
 	atomic_inc(&sdebug_completions);
 	qa_indx = indx;
 	if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
-		pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
+		pr_err("wild qa_indx=%d\n", qa_indx);
 		return;
 	}
 	spin_lock_irqsave(&queued_arr_lock, iflags);
@@ -3457,21 +3457,21 @@
 	scp = sqcp->a_cmnd;
 	if (NULL == scp) {
 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
-		pr_err("%s: scp is NULL\n", __func__);
+		pr_err("scp is NULL\n");
 		return;
 	}
 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
 	if (devip)
 		atomic_dec(&devip->num_in_q);
 	else
-		pr_err("%s: devip=NULL\n", __func__);
+		pr_err("devip=NULL\n");
 	if (atomic_read(&retired_max_queue) > 0)
 		retiring = 1;
 
 	sqcp->a_cmnd = NULL;
 	if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
-		pr_err("%s: Unexpected completion\n", __func__);
+		pr_err("Unexpected completion\n");
 		return;
 	}
 
@@ -3481,7 +3481,7 @@
 		retval = atomic_read(&retired_max_queue);
 		if (qa_indx >= retval) {
 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
-			pr_err("%s: index %d too large\n", __func__, retval);
+			pr_err("index %d too large\n", retval);
 			return;
 		}
 		k = find_last_bit(queued_in_use_bm, retval);
@@ -3509,7 +3509,7 @@
 	atomic_inc(&sdebug_completions);
 	qa_indx = sd_hrtp->qa_indx;
 	if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
-		pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
+		pr_err("wild qa_indx=%d\n", qa_indx);
 		goto the_end;
 	}
 	spin_lock_irqsave(&queued_arr_lock, iflags);
@@ -3517,21 +3517,21 @@
 	scp = sqcp->a_cmnd;
 	if (NULL == scp) {
 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
-		pr_err("%s: scp is NULL\n", __func__);
+		pr_err("scp is NULL\n");
 		goto the_end;
 	}
 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
 	if (devip)
 		atomic_dec(&devip->num_in_q);
 	else
-		pr_err("%s: devip=NULL\n", __func__);
+		pr_err("devip=NULL\n");
 	if (atomic_read(&retired_max_queue) > 0)
 		retiring = 1;
 
 	sqcp->a_cmnd = NULL;
 	if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
-		pr_err("%s: Unexpected completion\n", __func__);
+		pr_err("Unexpected completion\n");
 		goto the_end;
 	}
 
@@ -3541,7 +3541,7 @@
 		retval = atomic_read(&retired_max_queue);
 		if (qa_indx >= retval) {
 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
-			pr_err("%s: index %d too large\n", __func__, retval);
+			pr_err("index %d too large\n", retval);
 			goto the_end;
 		}
 		k = find_last_bit(queued_in_use_bm, retval);
@@ -3580,7 +3580,7 @@
 		return devip;
 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
 	if (!sdbg_host) {
-		pr_err("%s: Host info NULL\n", __func__);
+		pr_err("Host info NULL\n");
 		return NULL;
         }
 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
@@ -3596,8 +3596,7 @@
 	if (!open_devip) { /* try and make a new one */
 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
 		if (!open_devip) {
-			printk(KERN_ERR "%s: out of memory at line %d\n",
-				__func__, __LINE__);
+			pr_err("out of memory at line %d\n", __LINE__);
 			return NULL;
 		}
 	}
@@ -3615,7 +3614,7 @@
 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
 {
 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
-		printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
+		pr_info("slave_alloc <%u %u %u %llu>\n",
 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
 	return 0;
@@ -3626,7 +3625,7 @@
 	struct sdebug_dev_info *devip;
 
 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
-		printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
+		pr_info("slave_configure <%u %u %u %llu>\n",
 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
 	if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
 		sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
@@ -3646,7 +3645,7 @@
 		(struct sdebug_dev_info *)sdp->hostdata;
 
 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
-		printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
+		pr_info("slave_destroy <%u %u %u %llu>\n",
 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
 	if (devip) {
 		/* make this slot available for re-use */
@@ -3897,8 +3896,7 @@
 		return;
 	if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
 		scsi_debug_num_parts = SDEBUG_MAX_PARTS;
-		pr_warn("%s: reducing partitions to %d\n", __func__,
-			SDEBUG_MAX_PARTS);
+		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
 	}
 	num_sectors = (int)sdebug_store_sectors;
 	sectors_per_part = (num_sectors - sdebug_sectors_per)
@@ -3942,14 +3940,20 @@
 	unsigned long iflags;
 	int k, num_in_q, qdepth, inject;
 	struct sdebug_queued_cmd *sqcp = NULL;
-	struct scsi_device *sdp = cmnd->device;
+	struct scsi_device *sdp;
 
-	if (NULL == cmnd || NULL == devip) {
-		pr_warn("%s: called with NULL cmnd or devip pointer\n",
-			__func__);
+	/* this should never happen */
+	if (WARN_ON(!cmnd))
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	if (NULL == devip) {
+		pr_warn("called devip == NULL\n");
 		/* no particularly good error to report back */
 		return SCSI_MLQUEUE_HOST_BUSY;
 	}
+
+	sdp = cmnd->device;
+
 	if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
 			    __func__, scsi_result);
@@ -4383,8 +4387,7 @@
 
 				fake_storep = vmalloc(sz);
 				if (NULL == fake_storep) {
-					pr_err("%s: out of memory, 9\n",
-					       __func__);
+					pr_err("out of memory, 9\n");
 					return -ENOMEM;
 				}
 				memset(fake_storep, 0, sz);
@@ -4784,8 +4787,7 @@
 	atomic_set(&retired_max_queue, 0);
 
 	if (scsi_debug_ndelay >= 1000000000) {
-		pr_warn("%s: ndelay must be less than 1 second, ignored\n",
-			__func__);
+		pr_warn("ndelay must be less than 1 second, ignored\n");
 		scsi_debug_ndelay = 0;
 	} else if (scsi_debug_ndelay > 0)
 		scsi_debug_delay = DELAY_OVERRIDDEN;
@@ -4797,8 +4799,7 @@
 	case 4096:
 		break;
 	default:
-		pr_err("%s: invalid sector_size %d\n", __func__,
-		       scsi_debug_sector_size);
+		pr_err("invalid sector_size %d\n", scsi_debug_sector_size);
 		return -EINVAL;
 	}
 
@@ -4811,29 +4812,28 @@
 		break;
 
 	default:
-		pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
+		pr_err("dif must be 0, 1, 2 or 3\n");
 		return -EINVAL;
 	}
 
 	if (scsi_debug_guard > 1) {
-		pr_err("%s: guard must be 0 or 1\n", __func__);
+		pr_err("guard must be 0 or 1\n");
 		return -EINVAL;
 	}
 
 	if (scsi_debug_ato > 1) {
-		pr_err("%s: ato must be 0 or 1\n", __func__);
+		pr_err("ato must be 0 or 1\n");
 		return -EINVAL;
 	}
 
 	if (scsi_debug_physblk_exp > 15) {
-		pr_err("%s: invalid physblk_exp %u\n", __func__,
-		       scsi_debug_physblk_exp);
+		pr_err("invalid physblk_exp %u\n", scsi_debug_physblk_exp);
 		return -EINVAL;
 	}
 
 	if (scsi_debug_lowest_aligned > 0x3fff) {
-		pr_err("%s: lowest_aligned too big: %u\n", __func__,
-		       scsi_debug_lowest_aligned);
+		pr_err("lowest_aligned too big: %u\n",
+			scsi_debug_lowest_aligned);
 		return -EINVAL;
 	}
 
@@ -4863,7 +4863,7 @@
 	if (0 == scsi_debug_fake_rw) {
 		fake_storep = vmalloc(sz);
 		if (NULL == fake_storep) {
-			pr_err("%s: out of memory, 1\n", __func__);
+			pr_err("out of memory, 1\n");
 			return -ENOMEM;
 		}
 		memset(fake_storep, 0, sz);
@@ -4877,11 +4877,10 @@
 		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
 		dif_storep = vmalloc(dif_size);
 
-		pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
-			dif_storep);
+		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
 
 		if (dif_storep == NULL) {
-			pr_err("%s: out of mem. (DIX)\n", __func__);
+			pr_err("out of mem. (DIX)\n");
 			ret = -ENOMEM;
 			goto free_vm;
 		}
@@ -4903,18 +4902,17 @@
 		if (scsi_debug_unmap_alignment &&
 		    scsi_debug_unmap_granularity <=
 		    scsi_debug_unmap_alignment) {
-			pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
-			       __func__);
+			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
 			return -EINVAL;
 		}
 
 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
 
-		pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
+		pr_info("%lu provisioning blocks\n", map_size);
 
 		if (map_storep == NULL) {
-			pr_err("%s: out of mem. (MAP)\n", __func__);
+			pr_err("out of mem. (MAP)\n");
 			ret = -ENOMEM;
 			goto free_vm;
 		}
@@ -4928,18 +4926,18 @@
 
 	pseudo_primary = root_device_register("pseudo_0");
 	if (IS_ERR(pseudo_primary)) {
-		pr_warn("%s: root_device_register() error\n", __func__);
+		pr_warn("root_device_register() error\n");
 		ret = PTR_ERR(pseudo_primary);
 		goto free_vm;
 	}
 	ret = bus_register(&pseudo_lld_bus);
 	if (ret < 0) {
-		pr_warn("%s: bus_register error: %d\n", __func__, ret);
+		pr_warn("bus_register error: %d\n", ret);
 		goto dev_unreg;
 	}
 	ret = driver_register(&sdebug_driverfs_driver);
 	if (ret < 0) {
-		pr_warn("%s: driver_register error: %d\n", __func__, ret);
+		pr_warn("driver_register error: %d\n", ret);
 		goto bus_unreg;
 	}
 
@@ -4948,16 +4946,14 @@
 
         for (k = 0; k < host_to_add; k++) {
                 if (sdebug_add_adapter()) {
-			pr_err("%s: sdebug_add_adapter failed k=%d\n",
-				__func__, k);
+			pr_err("sdebug_add_adapter failed k=%d\n", k);
                         break;
                 }
         }
 
-	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
-		pr_info("%s: built %d host(s)\n", __func__,
-			scsi_debug_add_host);
-	}
+	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+		pr_info("built %d host(s)\n", scsi_debug_add_host);
+
 	return 0;
 
 bus_unreg:
@@ -4965,10 +4961,8 @@
 dev_unreg:
 	root_device_unregister(pseudo_primary);
 free_vm:
-	if (map_storep)
-		vfree(map_storep);
-	if (dif_storep)
-		vfree(dif_storep);
+	vfree(map_storep);
+	vfree(dif_storep);
 	vfree(fake_storep);
 
 	return ret;
@@ -4986,9 +4980,7 @@
 	bus_unregister(&pseudo_lld_bus);
 	root_device_unregister(pseudo_primary);
 
-	if (dif_storep)
-		vfree(dif_storep);
-
+	vfree(dif_storep);
 	vfree(fake_storep);
 }
 
@@ -5012,8 +5004,7 @@
 
         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
         if (NULL == sdbg_host) {
-                printk(KERN_ERR "%s: out of memory at line %d\n",
-                       __func__, __LINE__);
+		pr_err("out of memory at line %d\n", __LINE__);
                 return -ENOMEM;
         }
 
@@ -5023,8 +5014,7 @@
         for (k = 0; k < devs_per_host; k++) {
 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
 		if (!sdbg_devinfo) {
-                        printk(KERN_ERR "%s: out of memory at line %d\n",
-                               __func__, __LINE__);
+			pr_err("out of memory at line %d\n", __LINE__);
                         error = -ENOMEM;
 			goto clean;
                 }
@@ -5178,7 +5168,7 @@
 		}
 		sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
 	}
-	has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS);
+	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
 	if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
 		return schedule_resp(scp, NULL, errsts_no_connect, 0);
 
@@ -5338,7 +5328,7 @@
 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
 	if (NULL == hpnt) {
-		pr_err("%s: scsi_host_alloc failed\n", __func__);
+		pr_err("scsi_host_alloc failed\n");
 		error = -ENODEV;
 		return error;
 	}
@@ -5349,7 +5339,8 @@
 		hpnt->max_id = scsi_debug_num_tgts + 1;
 	else
 		hpnt->max_id = scsi_debug_num_tgts;
-	hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;	/* = scsi_debug_max_luns; */
+	/* = scsi_debug_max_luns; */
+	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
 
 	host_prot = 0;
 
@@ -5381,7 +5372,7 @@
 
 	scsi_host_set_prot(hpnt, host_prot);
 
-	printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
+	pr_info("host protection%s%s%s%s%s%s%s\n",
 	       (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
 	       (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
 	       (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
@@ -5409,7 +5400,7 @@
 
         error = scsi_add_host(hpnt, &sdbg_host->dev);
         if (error) {
-                printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
+		pr_err("scsi_add_host failed\n");
                 error = -ENODEV;
 		scsi_host_put(hpnt);
         } else
@@ -5426,8 +5417,7 @@
 	sdbg_host = to_sdebug_host(dev);
 
 	if (!sdbg_host) {
-		printk(KERN_ERR "%s: Unable to locate host info\n",
-		       __func__);
+		pr_err("Unable to locate host info\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
new file mode 100644
index 0000000..edb044a
--- /dev/null
+++ b/drivers/scsi/scsi_dh.c
@@ -0,0 +1,437 @@
+/*
+ * SCSI device handler infrastruture.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2007
+ *      Authors:
+ *               Chandra Seetharaman <sekharan@us.ibm.com>
+ *               Mike Anderson <andmike@linux.vnet.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <scsi/scsi_dh.h>
+#include "scsi_priv.h"
+
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(scsi_dh_list);
+
+struct scsi_dh_blist {
+	const char *vendor;
+	const char *model;
+	const char *driver;
+};
+
+static const struct scsi_dh_blist scsi_dh_blist[] = {
+	{"DGC", "RAID",			"clariion" },
+	{"DGC", "DISK",			"clariion" },
+	{"DGC", "VRAID",		"clariion" },
+
+	{"COMPAQ", "MSA1000 VOLUME",	"hp_sw" },
+	{"COMPAQ", "HSV110",		"hp_sw" },
+	{"HP", "HSV100",		"hp_sw"},
+	{"DEC", "HSG80",		"hp_sw"},
+
+	{"IBM", "1722",			"rdac", },
+	{"IBM", "1724",			"rdac", },
+	{"IBM", "1726",			"rdac", },
+	{"IBM", "1742",			"rdac", },
+	{"IBM", "1745",			"rdac", },
+	{"IBM", "1746",			"rdac", },
+	{"IBM", "1813",			"rdac", },
+	{"IBM", "1814",			"rdac", },
+	{"IBM", "1815",			"rdac", },
+	{"IBM", "1818",			"rdac", },
+	{"IBM", "3526",			"rdac", },
+	{"SGI", "TP9",			"rdac", },
+	{"SGI", "IS",			"rdac", },
+	{"STK", "OPENstorage D280",	"rdac", },
+	{"STK", "FLEXLINE 380",		"rdac", },
+	{"SUN", "CSM",			"rdac", },
+	{"SUN", "LCSM100",		"rdac", },
+	{"SUN", "STK6580_6780",		"rdac", },
+	{"SUN", "SUN_6180",		"rdac", },
+	{"SUN", "ArrayStorage",		"rdac", },
+	{"DELL", "MD3",			"rdac", },
+	{"NETAPP", "INF-01-00",		"rdac", },
+	{"LSI", "INF-01-00",		"rdac", },
+	{"ENGENIO", "INF-01-00",	"rdac", },
+	{NULL, NULL,			NULL },
+};
+
+static const char *
+scsi_dh_find_driver(struct scsi_device *sdev)
+{
+	const struct scsi_dh_blist *b;
+
+	if (scsi_device_tpgs(sdev))
+		return "alua";
+
+	for (b = scsi_dh_blist; b->vendor; b++) {
+		if (!strncmp(sdev->vendor, b->vendor, strlen(b->vendor)) &&
+		    !strncmp(sdev->model, b->model, strlen(b->model))) {
+			return b->driver;
+		}
+	}
+	return NULL;
+}
+
+
+static struct scsi_device_handler *__scsi_dh_lookup(const char *name)
+{
+	struct scsi_device_handler *tmp, *found = NULL;
+
+	spin_lock(&list_lock);
+	list_for_each_entry(tmp, &scsi_dh_list, list) {
+		if (!strncmp(tmp->name, name, strlen(tmp->name))) {
+			found = tmp;
+			break;
+		}
+	}
+	spin_unlock(&list_lock);
+	return found;
+}
+
+static struct scsi_device_handler *scsi_dh_lookup(const char *name)
+{
+	struct scsi_device_handler *dh;
+
+	dh = __scsi_dh_lookup(name);
+	if (!dh) {
+		request_module(name);
+		dh = __scsi_dh_lookup(name);
+	}
+
+	return dh;
+}
+
+/*
+ * scsi_dh_handler_attach - Attach a device handler to a device
+ * @sdev - SCSI device the device handler should attach to
+ * @scsi_dh - The device handler to attach
+ */
+static int scsi_dh_handler_attach(struct scsi_device *sdev,
+				  struct scsi_device_handler *scsi_dh)
+{
+	int error;
+
+	if (!try_module_get(scsi_dh->module))
+		return -EINVAL;
+
+	error = scsi_dh->attach(sdev);
+	if (error) {
+		sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%d)\n",
+			    scsi_dh->name, error);
+		module_put(scsi_dh->module);
+	} else
+		sdev->handler = scsi_dh;
+
+	return error;
+}
+
+/*
+ * scsi_dh_handler_detach - Detach a device handler from a device
+ * @sdev - SCSI device the device handler should be detached from
+ */
+static void scsi_dh_handler_detach(struct scsi_device *sdev)
+{
+	sdev->handler->detach(sdev);
+	sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", sdev->handler->name);
+	module_put(sdev->handler->module);
+}
+
+/*
+ * Functions for sysfs attribute 'dh_state'
+ */
+static ssize_t
+store_dh_state(struct device *dev, struct device_attribute *attr,
+	       const char *buf, size_t count)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct scsi_device_handler *scsi_dh;
+	int err = -EINVAL;
+
+	if (sdev->sdev_state == SDEV_CANCEL ||
+	    sdev->sdev_state == SDEV_DEL)
+		return -ENODEV;
+
+	if (!sdev->handler) {
+		/*
+		 * Attach to a device handler
+		 */
+		scsi_dh = scsi_dh_lookup(buf);
+		if (!scsi_dh)
+			return err;
+		err = scsi_dh_handler_attach(sdev, scsi_dh);
+	} else {
+		if (!strncmp(buf, "detach", 6)) {
+			/*
+			 * Detach from a device handler
+			 */
+			sdev_printk(KERN_WARNING, sdev,
+				    "can't detach handler %s.\n",
+				    sdev->handler->name);
+			err = -EINVAL;
+		} else if (!strncmp(buf, "activate", 8)) {
+			/*
+			 * Activate a device handler
+			 */
+			if (sdev->handler->activate)
+				err = sdev->handler->activate(sdev, NULL, NULL);
+			else
+				err = 0;
+		}
+	}
+
+	return err<0?err:count;
+}
+
+static ssize_t
+show_dh_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+
+	if (!sdev->handler)
+		return snprintf(buf, 20, "detached\n");
+
+	return snprintf(buf, 20, "%s\n", sdev->handler->name);
+}
+
+static struct device_attribute scsi_dh_state_attr =
+	__ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state,
+	       store_dh_state);
+
+int scsi_dh_add_device(struct scsi_device *sdev)
+{
+	struct scsi_device_handler *devinfo = NULL;
+	const char *drv;
+	int err;
+
+	err = device_create_file(&sdev->sdev_gendev, &scsi_dh_state_attr);
+	if (err)
+		return err;
+
+	drv = scsi_dh_find_driver(sdev);
+	if (drv)
+		devinfo = scsi_dh_lookup(drv);
+	if (devinfo)
+		err = scsi_dh_handler_attach(sdev, devinfo);
+	return err;
+}
+
+void scsi_dh_remove_device(struct scsi_device *sdev)
+{
+	if (sdev->handler)
+		scsi_dh_handler_detach(sdev);
+	device_remove_file(&sdev->sdev_gendev, &scsi_dh_state_attr);
+}
+
+/*
+ * scsi_register_device_handler - register a device handler personality
+ *      module.
+ * @scsi_dh - device handler to be registered.
+ *
+ * Returns 0 on success, -EBUSY if handler already registered.
+ */
+int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
+{
+	if (__scsi_dh_lookup(scsi_dh->name))
+		return -EBUSY;
+
+	if (!scsi_dh->attach || !scsi_dh->detach)
+		return -EINVAL;
+
+	spin_lock(&list_lock);
+	list_add(&scsi_dh->list, &scsi_dh_list);
+	spin_unlock(&list_lock);
+
+	printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
+
+	return SCSI_DH_OK;
+}
+EXPORT_SYMBOL_GPL(scsi_register_device_handler);
+
+/*
+ * scsi_unregister_device_handler - register a device handler personality
+ *      module.
+ * @scsi_dh - device handler to be unregistered.
+ *
+ * Returns 0 on success, -ENODEV if handler not registered.
+ */
+int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
+{
+	if (!__scsi_dh_lookup(scsi_dh->name))
+		return -ENODEV;
+
+	spin_lock(&list_lock);
+	list_del(&scsi_dh->list);
+	spin_unlock(&list_lock);
+	printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
+
+	return SCSI_DH_OK;
+}
+EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
+
+static struct scsi_device *get_sdev_from_queue(struct request_queue *q)
+{
+	struct scsi_device *sdev;
+	unsigned long flags;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	sdev = q->queuedata;
+	if (!sdev || !get_device(&sdev->sdev_gendev))
+		sdev = NULL;
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
+	return sdev;
+}
+
+/*
+ * scsi_dh_activate - activate the path associated with the scsi_device
+ *      corresponding to the given request queue.
+ *     Returns immediately without waiting for activation to be completed.
+ * @q    - Request queue that is associated with the scsi_device to be
+ *         activated.
+ * @fn   - Function to be called upon completion of the activation.
+ *         Function fn is called with data (below) and the error code.
+ *         Function fn may be called from the same calling context. So,
+ *         do not hold the lock in the caller which may be needed in fn.
+ * @data - data passed to the function fn upon completion.
+ *
+ */
+int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
+{
+	struct scsi_device *sdev;
+	int err = SCSI_DH_NOSYS;
+
+	sdev = get_sdev_from_queue(q);
+	if (!sdev) {
+		if (fn)
+			fn(data, err);
+		return err;
+	}
+
+	if (!sdev->handler)
+		goto out_fn;
+	err = SCSI_DH_NOTCONN;
+	if (sdev->sdev_state == SDEV_CANCEL ||
+	    sdev->sdev_state == SDEV_DEL)
+		goto out_fn;
+
+	err = SCSI_DH_DEV_OFFLINED;
+	if (sdev->sdev_state == SDEV_OFFLINE)
+		goto out_fn;
+
+	if (sdev->handler->activate)
+		err = sdev->handler->activate(sdev, fn, data);
+
+out_put_device:
+	put_device(&sdev->sdev_gendev);
+	return err;
+
+out_fn:
+	if (fn)
+		fn(data, err);
+	goto out_put_device;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_activate);
+
+/*
+ * scsi_dh_set_params - set the parameters for the device as per the
+ *      string specified in params.
+ * @q - Request queue that is associated with the scsi_device for
+ *      which the parameters to be set.
+ * @params - parameters in the following format
+ *      "no_of_params\0param1\0param2\0param3\0...\0"
+ *      for example, string for 2 parameters with value 10 and 21
+ *      is specified as "2\010\021\0".
+ */
+int scsi_dh_set_params(struct request_queue *q, const char *params)
+{
+	struct scsi_device *sdev;
+	int err = -SCSI_DH_NOSYS;
+
+	sdev = get_sdev_from_queue(q);
+	if (!sdev)
+		return err;
+
+	if (sdev->handler && sdev->handler->set_params)
+		err = sdev->handler->set_params(sdev, params);
+	put_device(&sdev->sdev_gendev);
+	return err;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_set_params);
+
+/*
+ * scsi_dh_attach - Attach device handler
+ * @q - Request queue that is associated with the scsi_device
+ *      the handler should be attached to
+ * @name - name of the handler to attach
+ */
+int scsi_dh_attach(struct request_queue *q, const char *name)
+{
+	struct scsi_device *sdev;
+	struct scsi_device_handler *scsi_dh;
+	int err = 0;
+
+	sdev = get_sdev_from_queue(q);
+	if (!sdev)
+		return -ENODEV;
+
+	scsi_dh = scsi_dh_lookup(name);
+	if (!scsi_dh) {
+		err = -EINVAL;
+		goto out_put_device;
+	}
+
+	if (sdev->handler) {
+		if (sdev->handler != scsi_dh)
+			err = -EBUSY;
+		goto out_put_device;
+	}
+
+	err = scsi_dh_handler_attach(sdev, scsi_dh);
+
+out_put_device:
+	put_device(&sdev->sdev_gendev);
+	return err;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_attach);
+
+/*
+ * scsi_dh_attached_handler_name - Get attached device handler's name
+ * @q - Request queue that is associated with the scsi_device
+ *      that may have a device handler attached
+ * @gfp - the GFP mask used in the kmalloc() call when allocating memory
+ *
+ * Returns name of attached handler, NULL if no handler is attached.
+ * Caller must take care to free the returned string.
+ */
+const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
+{
+	struct scsi_device *sdev;
+	const char *handler_name = NULL;
+
+	sdev = get_sdev_from_queue(q);
+	if (!sdev)
+		return NULL;
+
+	if (sdev->handler)
+		handler_name = kstrdup(sdev->handler->name, gfp);
+	put_device(&sdev->sdev_gendev);
+	return handler_name;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index afd34a6..66a96cd 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -33,9 +33,11 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_driver.h>
 #include <scsi/scsi_eh.h>
+#include <scsi/scsi_common.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_dh.h>
 #include <scsi/sg.h>
 
 #include "scsi_priv.h"
@@ -463,11 +465,10 @@
 	if (scsi_sense_is_deferred(&sshdr))
 		return NEEDS_RETRY;
 
-	if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
-			sdev->scsi_dh_data->scsi_dh->check_sense) {
+	if (sdev->handler && sdev->handler->check_sense) {
 		int rc;
 
-		rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
+		rc = sdev->handler->check_sense(sdev, &sshdr);
 		if (rc != SCSI_RETURN_NOT_HANDLED)
 			return rc;
 		/* handler does not care. Drop down to default handling */
@@ -2178,8 +2179,17 @@
 	 * We never actually get interrupted because kthread_run
 	 * disables signal delivery for the created thread.
 	 */
-	while (!kthread_should_stop()) {
+	while (true) {
+		/*
+		 * The sequence in kthread_stop() sets the stop flag first
+		 * then wakes the process.  To avoid missed wakeups, the task
+		 * should always be in a non running state before the stop
+		 * flag is checked
+		 */
 		set_current_state(TASK_INTERRUPTIBLE);
+		if (kthread_should_stop())
+			break;
+
 		if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
 		    shost->host_failed != atomic_read(&shost->host_busy)) {
 			SCSI_LOG_ERROR_RECOVERY(1,
@@ -2416,45 +2426,6 @@
 EXPORT_SYMBOL(scsi_command_normalize_sense);
 
 /**
- * scsi_sense_desc_find - search for a given descriptor type in	descriptor sense data format.
- * @sense_buffer:	byte array of descriptor format sense data
- * @sb_len:		number of valid bytes in sense_buffer
- * @desc_type:		value of descriptor type to find
- *			(e.g. 0 -> information)
- *
- * Notes:
- *	only valid when sense data is in descriptor format
- *
- * Return value:
- *	pointer to start of (first) descriptor if found else NULL
- */
-const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
-				int desc_type)
-{
-	int add_sen_len, add_len, desc_len, k;
-	const u8 * descp;
-
-	if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
-		return NULL;
-	if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
-		return NULL;
-	add_sen_len = (add_sen_len < (sb_len - 8)) ?
-			add_sen_len : (sb_len - 8);
-	descp = &sense_buffer[8];
-	for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
-		descp += desc_len;
-		add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
-		desc_len = add_len + 2;
-		if (descp[0] == desc_type)
-			return descp;
-		if (add_len < 0) // short descriptor ??
-			break;
-	}
-	return NULL;
-}
-EXPORT_SYMBOL(scsi_sense_desc_find);
-
-/**
  * scsi_get_sense_info_fld - get information field from sense data (either fixed or descriptor format)
  * @sense_buffer:	byte array of sense data
  * @sb_len:		number of valid bytes in sense_buffer
@@ -2503,31 +2474,3 @@
 	}
 }
 EXPORT_SYMBOL(scsi_get_sense_info_fld);
-
-/**
- * scsi_build_sense_buffer - build sense data in a buffer
- * @desc:	Sense format (non zero == descriptor format,
- * 		0 == fixed format)
- * @buf:	Where to build sense data
- * @key:	Sense key
- * @asc:	Additional sense code
- * @ascq:	Additional sense code qualifier
- *
- **/
-void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
-{
-	if (desc) {
-		buf[0] = 0x72;	/* descriptor, current */
-		buf[1] = key;
-		buf[2] = asc;
-		buf[3] = ascq;
-		buf[7] = 0;
-	} else {
-		buf[0] = 0x70;	/* fixed, current */
-		buf[2] = key;
-		buf[7] = 0xa;
-		buf[12] = asc;
-		buf[13] = ascq;
-	}
-}
-EXPORT_SYMBOL(scsi_build_sense_buffer);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 882864f..cbfc599 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -31,6 +31,7 @@
 #include <scsi/scsi_driver.h>
 #include <scsi/scsi_eh.h>
 #include <scsi/scsi_host.h>
+#include <scsi/scsi_dh.h>
 
 #include <trace/events/scsi.h>
 
@@ -1248,9 +1249,8 @@
 {
 	struct scsi_cmnd *cmd = req->special;
 
-	if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
-			 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
-		int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
+	if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
+		int ret = sdev->handler->prep_fn(sdev, req);
 		if (ret != BLKPREP_OK)
 			return ret;
 	}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index e3902fc..644bb73 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -170,6 +170,15 @@
 extern struct async_domain scsi_sd_pm_domain;
 extern struct async_domain scsi_sd_probe_domain;
 
+/* scsi_dh.c */
+#ifdef CONFIG_SCSI_DH
+int scsi_dh_add_device(struct scsi_device *sdev);
+void scsi_dh_remove_device(struct scsi_device *sdev);
+#else
+static inline int scsi_dh_add_device(struct scsi_device *sdev) { return 0; }
+static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
+#endif
+
 /* 
  * internal scsi timeout functions: for use by mid-layer and transport
  * classes.
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 9ad41168..b333389 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1030,11 +1030,20 @@
 				"failed to add device: %d\n", error);
 		return error;
 	}
+
+	error = scsi_dh_add_device(sdev);
+	if (error) {
+		sdev_printk(KERN_INFO, sdev,
+				"failed to add device handler: %d\n", error);
+		return error;
+	}
+
 	device_enable_async_suspend(&sdev->sdev_dev);
 	error = device_add(&sdev->sdev_dev);
 	if (error) {
 		sdev_printk(KERN_INFO, sdev,
 				"failed to add class device: %d\n", error);
+		scsi_dh_remove_device(sdev);
 		device_del(&sdev->sdev_gendev);
 		return error;
 	}
@@ -1074,6 +1083,7 @@
 		bsg_unregister_queue(sdev->request_queue);
 		device_unregister(&sdev->sdev_dev);
 		transport_remove_device(dev);
+		scsi_dh_remove_device(sdev);
 		device_del(dev);
 	} else
 		put_device(&sdev->sdev_dev);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 9a05819..30d26e3 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1222,13 +1222,6 @@
 	u64 identifier;
 	int error;
 
-	/*
-	 * Only devices behind an expander are supported, because the
-	 * enclosure identifier is a SMP feature.
-	 */
-	if (scsi_is_sas_phy_local(phy))
-		return -EINVAL;
-
 	error = i->f->get_enclosure_identifier(rphy, &identifier);
 	if (error)
 		return error;
@@ -1248,9 +1241,6 @@
 	struct sas_internal *i = to_sas_internal(shost->transportt);
 	int val;
 
-	if (scsi_is_sas_phy_local(phy))
-		return -EINVAL;
-
 	val = i->f->get_bay_identifier(rphy);
 	if (val < 0)
 		return val;
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 043419d..8e72bcb 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -65,7 +65,7 @@
 	raw_spin_unlock_irqrestore(&intc_big_lock, flags);
 }
 
-static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
+static void intc_redirect_irq(struct irq_desc *desc)
 {
 	generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc));
 }
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index 7dff08e..6ce7f0d 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -99,15 +99,7 @@
  */
 static inline void activate_irq(int irq)
 {
-#ifdef CONFIG_ARM
-	/* ARM requires an extra step to clear IRQ_NOREQUEST, which it
-	 * sets on behalf of every irq_chip.  Also sets IRQ_NOPROBE.
-	 */
-	set_irq_flags(irq, IRQF_VALID);
-#else
-	/* same effect on other architectures */
-	irq_set_noprobe(irq);
-#endif
+	irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE);
 }
 
 static inline int intc_handle_int_cmp(const void *a, const void *b)
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index bafc51c..e789962 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -109,7 +109,7 @@
 	return 0;
 }
 
-static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc)
+static void intc_virq_handler(struct irq_desc *desc)
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 	struct irq_data *data = irq_desc_get_irq_data(desc);
@@ -127,7 +127,7 @@
 			handle = (unsigned long)irq_desc_get_handler_data(vdesc);
 			addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
 			if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
-				generic_handle_irq_desc(entry->irq, vdesc);
+				generic_handle_irq_desc(vdesc);
 		}
 	}
 
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index d3d1891..25abd4e 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -35,20 +35,11 @@
 static int __init sh_pm_runtime_init(void)
 {
 	if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
-		if (!of_machine_is_compatible("renesas,emev2") &&
-		    !of_machine_is_compatible("renesas,r7s72100") &&
-#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
-		    !of_machine_is_compatible("renesas,r8a73a4") &&
-		    !of_machine_is_compatible("renesas,r8a7740") &&
-		    !of_machine_is_compatible("renesas,sh73a0") &&
-#endif
-		    !of_machine_is_compatible("renesas,r8a7778") &&
-		    !of_machine_is_compatible("renesas,r8a7779") &&
-		    !of_machine_is_compatible("renesas,r8a7790") &&
-		    !of_machine_is_compatible("renesas,r8a7791") &&
-		    !of_machine_is_compatible("renesas,r8a7792") &&
-		    !of_machine_is_compatible("renesas,r8a7793") &&
-		    !of_machine_is_compatible("renesas,r8a7794"))
+		if (!of_find_compatible_node(NULL, NULL,
+					     "renesas,cpg-mstp-clocks"))
+			return 0;
+		if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS_OF) &&
+		    of_find_node_with_property(NULL, "#power-domain-cells"))
 			return 0;
 	}
 
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
index 6792aae..052aecf 100644
--- a/drivers/soc/dove/pmu.c
+++ b/drivers/soc/dove/pmu.c
@@ -222,9 +222,9 @@
 }
 
 /* PMU IRQ controller */
-static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void pmu_irq_handler(struct irq_desc *desc)
 {
-	struct pmu_data *pmu = irq_get_handler_data(irq);
+	struct pmu_data *pmu = irq_desc_get_handler_data(desc);
 	struct irq_chip_generic *gc = pmu->irq_gc;
 	struct irq_domain *domain = pmu->irq_domain;
 	void __iomem *base = gc->reg_base;
@@ -232,7 +232,7 @@
 	u32 done = ~0;
 
 	if (stat == 0) {
-		handle_bad_irq(irq, desc);
+		handle_bad_irq(desc);
 		return;
 	}
 
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index bf9ed38..63318e2 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1720,6 +1720,7 @@
 	return clk_prepare_enable(as->clk);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int atmel_spi_suspend(struct device *dev)
 {
 	struct spi_master *master = dev_get_drvdata(dev);
@@ -1756,6 +1757,7 @@
 
 	return ret;
 }
+#endif
 
 static const struct dev_pm_ops atmel_spi_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index e7874a6..3e8eeb2 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -386,14 +386,14 @@
 	/* otherwise we only allow transfers within the same page
 	 * to avoid wasting time on dma_mapping when it is not practical
 	 */
-	if (((size_t)tfr->tx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) {
+	if (((size_t)tfr->tx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
 		dev_warn_once(&spi->dev,
 			      "Unaligned spi tx-transfer bridging page\n");
 		return false;
 	}
-	if (((size_t)tfr->rx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) {
+	if (((size_t)tfr->rx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
 		dev_warn_once(&spi->dev,
-			      "Unaligned spi tx-transfer bridging page\n");
+			      "Unaligned spi rx-transfer bridging page\n");
 		return false;
 	}
 
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 5468fc7..2465259 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -444,6 +444,7 @@
 	{ .compatible = "amlogic,meson6-spifc", },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, meson_spifc_dt_match);
 
 static struct platform_driver meson_spifc_driver = {
 	.probe	= meson_spifc_probe,
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 5f6315c..ecb6c58 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -85,7 +85,7 @@
 	void __iomem *base;
 	u32 state;
 	u32 pad_sel;
-	struct clk *spi_clk, *parent_clk;
+	struct clk *parent_clk, *sel_clk, *spi_clk;
 	struct spi_transfer *cur_transfer;
 	u32 xfer_len;
 	struct scatterlist *tx_sgl, *rx_sgl;
@@ -173,22 +173,6 @@
 		writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG);
 }
 
-static int mtk_spi_prepare_hardware(struct spi_master *master)
-{
-	struct spi_transfer *trans;
-	struct mtk_spi *mdata = spi_master_get_devdata(master);
-	struct spi_message *msg = master->cur_msg;
-
-	trans = list_first_entry(&msg->transfers, struct spi_transfer,
-				 transfer_list);
-	if (!trans->cs_change) {
-		mdata->state = MTK_SPI_IDLE;
-		mtk_spi_reset(mdata);
-	}
-
-	return 0;
-}
-
 static int mtk_spi_prepare_message(struct spi_master *master,
 				   struct spi_message *msg)
 {
@@ -228,11 +212,15 @@
 	struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
 
 	reg_val = readl(mdata->base + SPI_CMD_REG);
-	if (!enable)
+	if (!enable) {
 		reg_val |= SPI_CMD_PAUSE_EN;
-	else
+		writel(reg_val, mdata->base + SPI_CMD_REG);
+	} else {
 		reg_val &= ~SPI_CMD_PAUSE_EN;
-	writel(reg_val, mdata->base + SPI_CMD_REG);
+		writel(reg_val, mdata->base + SPI_CMD_REG);
+		mdata->state = MTK_SPI_IDLE;
+		mtk_spi_reset(mdata);
+	}
 }
 
 static void mtk_spi_prepare_transfer(struct spi_master *master,
@@ -509,7 +497,6 @@
 	master->mode_bits = SPI_CPOL | SPI_CPHA;
 
 	master->set_cs = mtk_spi_set_cs;
-	master->prepare_transfer_hardware = mtk_spi_prepare_hardware;
 	master->prepare_message = mtk_spi_prepare_message;
 	master->transfer_one = mtk_spi_transfer_one;
 	master->can_dma = mtk_spi_can_dma;
@@ -576,13 +563,6 @@
 		goto err_put_master;
 	}
 
-	mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
-	if (IS_ERR(mdata->spi_clk)) {
-		ret = PTR_ERR(mdata->spi_clk);
-		dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
-		goto err_put_master;
-	}
-
 	mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
 	if (IS_ERR(mdata->parent_clk)) {
 		ret = PTR_ERR(mdata->parent_clk);
@@ -590,13 +570,27 @@
 		goto err_put_master;
 	}
 
+	mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk");
+	if (IS_ERR(mdata->sel_clk)) {
+		ret = PTR_ERR(mdata->sel_clk);
+		dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret);
+		goto err_put_master;
+	}
+
+	mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
+	if (IS_ERR(mdata->spi_clk)) {
+		ret = PTR_ERR(mdata->spi_clk);
+		dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
+		goto err_put_master;
+	}
+
 	ret = clk_prepare_enable(mdata->spi_clk);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
 		goto err_put_master;
 	}
 
-	ret = clk_set_parent(mdata->spi_clk, mdata->parent_clk);
+	ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
 		goto err_disable_clk;
@@ -630,7 +624,6 @@
 	pm_runtime_disable(&pdev->dev);
 
 	mtk_spi_reset(mdata);
-	clk_disable_unprepare(mdata->spi_clk);
 	spi_master_put(master);
 
 	return 0;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index fdd79197..a8ef38e 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -654,6 +654,10 @@
 	if (!(sccr1_reg & SSCR1_TIE))
 		mask &= ~SSSR_TFS;
 
+	/* Ignore RX timeout interrupt if it is disabled */
+	if (!(sccr1_reg & SSCR1_TINTE))
+		mask &= ~SSSR_TINT;
+
 	if (!(status & mask))
 		return IRQ_NONE;
 
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index 2e32ea2..be6155c 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -34,13 +34,13 @@
 static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
 				      unsigned addr, u32 val)
 {
-	iowrite32(val, spi->regs + addr);
+	__raw_writel(val, spi->regs + addr);
 }
 
 static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
 					     unsigned addr)
 {
-	return ioread32(spi->regs + addr);
+	return __raw_readl(spi->regs + addr);
 }
 
 static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 3abb390..a5f53de 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1610,8 +1610,7 @@
  *
  * The caller is responsible for assigning the bus number and initializing
  * the master's methods before calling spi_register_master(); and (after errors
- * adding the device) calling spi_master_put() and kfree() to prevent a memory
- * leak.
+ * adding the device) calling spi_master_put() to prevent a memory leak.
  */
 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
 {
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index fba92a5..ef008e5 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -651,7 +651,8 @@
 		kfree(spidev->rx_buffer);
 		spidev->rx_buffer = NULL;
 
-		spidev->speed_hz = spidev->spi->max_speed_hz;
+		if (spidev->spi)
+			spidev->speed_hz = spidev->spi->max_speed_hz;
 
 		/* ... after we unbound from the underlying device? */
 		spin_lock_irq(&spidev->spi_lock);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index bdfb3c8..4a3cf9b 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -451,7 +451,7 @@
 	}
 }
 
-static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc)
+static void pmic_arb_chained_irq(struct irq_desc *desc)
 {
 	struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 20288fc..8f3ac37 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -5,5 +5,25 @@
 	- add proper arch dependencies as needed
 	- audit userspace interfaces to make sure they are sane
 
+
+ion/
+ - Remove ION_IOC_SYNC: Flushing for devices should be purely a kernel internal
+   interface on top of dma-buf. flush_for_device needs to be added to dma-buf
+   first.
+ - Remove ION_IOC_CUSTOM: Atm used for cache flushing for cpu access in some
+   vendor trees. Should be replaced with an ioctl on the dma-buf to expose the
+   begin/end_cpu_access hooks to userspace.
+ - Clarify the tricks ion plays with explicitly managing coherency behind the
+   dma api's back (this is absolutely needed for high-perf gpu drivers): Add an
+   explicit coherency management mode to flush_for_device to be used by drivers
+   which want to manage caches themselves and which indicates whether cpu caches
+   need flushing.
+ - With those removed there's probably no use for ION_IOC_IMPORT anymore either
+   since ion would just be the central allocator for shared buffers.
+ - Add dt-binding to expose cma regions as ion heaps, with the rule that any
+   such cma regions must already be used by some device for dma. I.e. ion only
+   exposes existing cma regions and doesn't reserve unecessarily memory when
+   booting a system which doesn't use ion.
+
 Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
 Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 217aa53..6e8d839 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1179,13 +1179,13 @@
 		mutex_unlock(&client->lock);
 		goto end;
 	}
-	mutex_unlock(&client->lock);
 
 	handle = ion_handle_create(client, buffer);
-	if (IS_ERR(handle))
+	if (IS_ERR(handle)) {
+		mutex_unlock(&client->lock);
 		goto end;
+	}
 
-	mutex_lock(&client->lock);
 	ret = ion_handle_add(client, handle);
 	mutex_unlock(&client->lock);
 	if (ret) {
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index 81df77b..9c41652 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -91,7 +91,7 @@
 		.pdev		= &lcdc0_device,
 		.clocks		= lcdc0_clocks,
 		.nclocks	= ARRAY_SIZE(lcdc0_clocks),
-		.domain		= "a4lc",
+		.domain		= "/system-controller@e6180000/pm-domains/c5/a4lc@1"
 	},
 };
 
diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
index 29d456e..3eb5eb8 100644
--- a/drivers/staging/board/board.c
+++ b/drivers/staging/board/board.c
@@ -135,6 +135,40 @@
 	return error;
 }
 
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+static int board_staging_add_dev_domain(struct platform_device *pdev,
+					const char *domain)
+{
+	struct of_phandle_args pd_args;
+	struct generic_pm_domain *pd;
+	struct device_node *np;
+
+	np = of_find_node_by_path(domain);
+	if (!np) {
+		pr_err("Cannot find domain node %s\n", domain);
+		return -ENOENT;
+	}
+
+	pd_args.np = np;
+	pd_args.args_count = 0;
+	pd = of_genpd_get_from_provider(&pd_args);
+	if (IS_ERR(pd)) {
+		pr_err("Cannot find genpd %s (%ld)\n", domain, PTR_ERR(pd));
+		return PTR_ERR(pd);
+
+	}
+	pr_debug("Found genpd %s for device %s\n", pd->name, pdev->name);
+
+	return pm_genpd_add_device(pd, &pdev->dev);
+}
+#else
+static inline int board_staging_add_dev_domain(struct platform_device *pdev,
+					       const char *domain)
+{
+	return 0;
+}
+#endif
+
 int __init board_staging_register_device(const struct board_staging_dev *dev)
 {
 	struct platform_device *pdev = dev->pdev;
@@ -161,7 +195,7 @@
 	}
 
 	if (dev->domain)
-		__pm_genpd_name_add_device(dev->domain, &pdev->dev, NULL);
+		board_staging_add_dev_domain(pdev, dev->domain);
 
 	return error;
 }
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index 32f3a9d..5cafa50 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -76,7 +76,7 @@
 
 	/* Set CS active high */
 	par->spi->mode |= SPI_CS_HIGH;
-	ret = par->spi->master->setup(par->spi);
+	ret = spi_setup(par->spi);
 	if (ret) {
 		dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
 		return ret;
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
index 88fb2c0..8eae6ef 100644
--- a/drivers/staging/fbtft/fb_watterott.c
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -169,7 +169,7 @@
 	/* enable SPI interface by having CS and MOSI low during reset */
 	save_mode = par->spi->mode;
 	par->spi->mode |= SPI_CS_HIGH;
-	ret = par->spi->master->setup(par->spi); /* set CS inactive low */
+	ret = spi_setup(par->spi); /* set CS inactive low */
 	if (ret) {
 		dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
 		return ret;
@@ -180,7 +180,7 @@
 	par->fbtftops.reset(par);
 	mdelay(1000);
 	par->spi->mode = save_mode;
-	ret = par->spi->master->setup(par->spi);
+	ret = spi_setup(par->spi);
 	if (ret) {
 		dev_err(par->info->device, "Could not restore SPI mode\n");
 		return ret;
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 23392eb..7f5fa3d 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -1436,15 +1436,11 @@
 
 	/* 9-bit SPI setup */
 	if (par->spi && display->buswidth == 9) {
-		par->spi->bits_per_word = 9;
-		ret = par->spi->master->setup(par->spi);
-		if (ret) {
+		if (par->spi->master->bits_per_word_mask & SPI_BPW_MASK(9)) {
+			par->spi->bits_per_word = 9;
+		} else {
 			dev_warn(&par->spi->dev,
 				"9-bit SPI not available, emulating using 8-bit.\n");
-			par->spi->bits_per_word = 8;
-			ret = par->spi->master->setup(par->spi);
-			if (ret)
-				goto out_release;
 			/* allocate buffer with room for dc bits */
 			par->extra = devm_kzalloc(par->info->device,
 				par->txbuf.len + (par->txbuf.len / 8) + 8,
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index c763efc..3f380a0 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -463,15 +463,12 @@
 			}
 			par->fbtftops.write_register = fbtft_write_reg8_bus9;
 			par->fbtftops.write_vmem = fbtft_write_vmem16_bus9;
-			sdev->bits_per_word = 9;
-			ret = sdev->master->setup(sdev);
-			if (ret) {
+			if (par->spi->master->bits_per_word_mask
+			    & SPI_BPW_MASK(9)) {
+				par->spi->bits_per_word = 9;
+			} else {
 				dev_warn(dev,
 					"9-bit SPI not available, emulating using 8-bit.\n");
-				sdev->bits_per_word = 8;
-				ret = sdev->master->setup(sdev);
-				if (ret)
-					goto out_release;
 				/* allocate buffer with room for dc bits */
 				par->extra = devm_kzalloc(par->info->device,
 						par->txbuf.len + (par->txbuf.len / 8) + 8,
diff --git a/drivers/staging/lustre/README.txt b/drivers/staging/lustre/README.txt
index cf0ca50..0676243 100644
--- a/drivers/staging/lustre/README.txt
+++ b/drivers/staging/lustre/README.txt
@@ -14,10 +14,8 @@
 Lustre has independent Metadata and Data servers that clients can access
 in parallel to maximize performance.
 
-In order to use Lustre client you will need to download lustre client
-tools from
-https://downloads.hpdd.intel.com/public/lustre/latest-feature-release/
-the package name is lustre-client.
+In order to use Lustre client you will need to download the "lustre-client"
+package that contains the userspace tools from http://lustre.org/download/
 
 You will need to install and configure your Lustre servers separately.
 
@@ -76,12 +74,10 @@
 
 More Information
 ================
-You can get more information at
-OpenSFS website: http://lustre.opensfs.org/about/
-Intel HPDD wiki: https://wiki.hpdd.intel.com
+You can get more information at the Lustre website: http://wiki.lustre.org/
 
-Out of tree Lustre client and server code is available at:
-http://git.whamcloud.com/fs/lustre-release.git
+Source for the userspace tools and out-of-tree client and server code
+is available at: http://git.hpdd.intel.com/fs/lustre-release.git
 
 Latest binary packages:
-http://lustre.opensfs.org/download-lustre/
+http://lustre.org/download/
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index d50de03..0b9b9b5 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -1,5 +1,6 @@
 menuconfig MOST
         tristate "MOST driver"
+	depends on HAS_DMA
         select MOSTCORE
         default n
         ---help---
diff --git a/drivers/staging/most/hdm-dim2/Kconfig b/drivers/staging/most/hdm-dim2/Kconfig
index 1d4ad1d..fc54876 100644
--- a/drivers/staging/most/hdm-dim2/Kconfig
+++ b/drivers/staging/most/hdm-dim2/Kconfig
@@ -5,6 +5,7 @@
 config HDM_DIM2
 	tristate "DIM2 HDM"
 	depends on AIM_NETWORK
+	depends on HAS_IOMEM
 
 	---help---
 	  Say Y here if you want to connect via MediaLB to network transceiver.
diff --git a/drivers/staging/most/hdm-usb/Kconfig b/drivers/staging/most/hdm-usb/Kconfig
index a482c3f..ec15463 100644
--- a/drivers/staging/most/hdm-usb/Kconfig
+++ b/drivers/staging/most/hdm-usb/Kconfig
@@ -4,7 +4,7 @@
 
 config HDM_USB
 	tristate "USB HDM"
-	depends on USB
+	depends on USB && NET
 	select AIM_NETWORK
 	---help---
 	  Say Y here if you want to connect via USB to network tranceiver.
diff --git a/drivers/staging/most/mostcore/Kconfig b/drivers/staging/most/mostcore/Kconfig
index 38abf1b..4717254 100644
--- a/drivers/staging/most/mostcore/Kconfig
+++ b/drivers/staging/most/mostcore/Kconfig
@@ -4,6 +4,7 @@
 
 config MOSTCORE
 	tristate "MOST Core"
+	depends on HAS_DMA
 
 	---help---
 	  Say Y here if you want to enable MOST support.
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
index cf5fe9b..d7f6235 100644
--- a/drivers/staging/rdma/Kconfig
+++ b/drivers/staging/rdma/Kconfig
@@ -24,6 +24,8 @@
 
 source "drivers/staging/rdma/amso1100/Kconfig"
 
+source "drivers/staging/rdma/ehca/Kconfig"
+
 source "drivers/staging/rdma/hfi1/Kconfig"
 
 source "drivers/staging/rdma/ipath/Kconfig"
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
index cbd915a..139d78e 100644
--- a/drivers/staging/rdma/Makefile
+++ b/drivers/staging/rdma/Makefile
@@ -1,4 +1,5 @@
 # Entries for RDMA_STAGING tree
 obj-$(CONFIG_INFINIBAND_AMSO1100)	+= amso1100/
+obj-$(CONFIG_INFINIBAND_EHCA)	+= ehca/
 obj-$(CONFIG_INFINIBAND_HFI1)	+= hfi1/
 obj-$(CONFIG_INFINIBAND_IPATH)	+= ipath/
diff --git a/drivers/infiniband/hw/ehca/Kconfig b/drivers/staging/rdma/ehca/Kconfig
similarity index 69%
rename from drivers/infiniband/hw/ehca/Kconfig
rename to drivers/staging/rdma/ehca/Kconfig
index 59f807d..3fadd2a 100644
--- a/drivers/infiniband/hw/ehca/Kconfig
+++ b/drivers/staging/rdma/ehca/Kconfig
@@ -2,7 +2,8 @@
 	tristate "eHCA support"
 	depends on IBMEBUS
 	---help---
-	This driver supports the IBM pSeries eHCA InfiniBand adapter.
+	This driver supports the deprecated IBM pSeries eHCA InfiniBand
+	adapter.
 
 	To compile the driver as a module, choose M here. The module
 	will be called ib_ehca.
diff --git a/drivers/infiniband/hw/ehca/Makefile b/drivers/staging/rdma/ehca/Makefile
similarity index 100%
rename from drivers/infiniband/hw/ehca/Makefile
rename to drivers/staging/rdma/ehca/Makefile
diff --git a/drivers/staging/rdma/ehca/TODO b/drivers/staging/rdma/ehca/TODO
new file mode 100644
index 0000000..199a4a6
--- /dev/null
+++ b/drivers/staging/rdma/ehca/TODO
@@ -0,0 +1,4 @@
+9/2015
+
+The ehca driver has been deprecated and moved to drivers/staging/rdma.
+It will be removed in the 4.6 merge window.
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/staging/rdma/ehca/ehca_av.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_av.c
rename to drivers/staging/rdma/ehca/ehca_av.c
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/staging/rdma/ehca/ehca_classes.h
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_classes.h
rename to drivers/staging/rdma/ehca/ehca_classes.h
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
rename to drivers/staging/rdma/ehca/ehca_classes_pSeries.h
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/staging/rdma/ehca/ehca_cq.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_cq.c
rename to drivers/staging/rdma/ehca/ehca_cq.c
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/staging/rdma/ehca/ehca_eq.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_eq.c
rename to drivers/staging/rdma/ehca/ehca_eq.c
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/staging/rdma/ehca/ehca_hca.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_hca.c
rename to drivers/staging/rdma/ehca/ehca_hca.c
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/staging/rdma/ehca/ehca_irq.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_irq.c
rename to drivers/staging/rdma/ehca/ehca_irq.c
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/staging/rdma/ehca/ehca_irq.h
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_irq.h
rename to drivers/staging/rdma/ehca/ehca_irq.h
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_iverbs.h
rename to drivers/staging/rdma/ehca/ehca_iverbs.h
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_main.c
rename to drivers/staging/rdma/ehca/ehca_main.c
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/staging/rdma/ehca/ehca_mcast.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_mcast.c
rename to drivers/staging/rdma/ehca/ehca_mcast.c
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_mrmw.c
rename to drivers/staging/rdma/ehca/ehca_mrmw.c
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/staging/rdma/ehca/ehca_mrmw.h
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_mrmw.h
rename to drivers/staging/rdma/ehca/ehca_mrmw.h
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/staging/rdma/ehca/ehca_pd.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_pd.c
rename to drivers/staging/rdma/ehca/ehca_pd.c
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/staging/rdma/ehca/ehca_qes.h
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_qes.h
rename to drivers/staging/rdma/ehca/ehca_qes.h
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/staging/rdma/ehca/ehca_qp.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_qp.c
rename to drivers/staging/rdma/ehca/ehca_qp.c
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_reqs.c
rename to drivers/staging/rdma/ehca/ehca_reqs.c
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/staging/rdma/ehca/ehca_sqp.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_sqp.c
rename to drivers/staging/rdma/ehca/ehca_sqp.c
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/staging/rdma/ehca/ehca_tools.h
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_tools.h
rename to drivers/staging/rdma/ehca/ehca_tools.h
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/staging/rdma/ehca/ehca_uverbs.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ehca_uverbs.c
rename to drivers/staging/rdma/ehca/ehca_uverbs.c
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/staging/rdma/ehca/hcp_if.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/hcp_if.c
rename to drivers/staging/rdma/ehca/hcp_if.c
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/staging/rdma/ehca/hcp_if.h
similarity index 100%
rename from drivers/infiniband/hw/ehca/hcp_if.h
rename to drivers/staging/rdma/ehca/hcp_if.h
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/staging/rdma/ehca/hcp_phyp.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/hcp_phyp.c
rename to drivers/staging/rdma/ehca/hcp_phyp.c
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/staging/rdma/ehca/hcp_phyp.h
similarity index 100%
rename from drivers/infiniband/hw/ehca/hcp_phyp.h
rename to drivers/staging/rdma/ehca/hcp_phyp.h
diff --git a/drivers/infiniband/hw/ehca/hipz_fns.h b/drivers/staging/rdma/ehca/hipz_fns.h
similarity index 100%
rename from drivers/infiniband/hw/ehca/hipz_fns.h
rename to drivers/staging/rdma/ehca/hipz_fns.h
diff --git a/drivers/infiniband/hw/ehca/hipz_fns_core.h b/drivers/staging/rdma/ehca/hipz_fns_core.h
similarity index 100%
rename from drivers/infiniband/hw/ehca/hipz_fns_core.h
rename to drivers/staging/rdma/ehca/hipz_fns_core.h
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/staging/rdma/ehca/hipz_hw.h
similarity index 100%
rename from drivers/infiniband/hw/ehca/hipz_hw.h
rename to drivers/staging/rdma/ehca/hipz_hw.h
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/staging/rdma/ehca/ipz_pt_fn.c
similarity index 100%
rename from drivers/infiniband/hw/ehca/ipz_pt_fn.c
rename to drivers/staging/rdma/ehca/ipz_pt_fn.c
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/staging/rdma/ehca/ipz_pt_fn.h
similarity index 100%
rename from drivers/infiniband/hw/ehca/ipz_pt_fn.h
rename to drivers/staging/rdma/ehca/ipz_pt_fn.h
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c
index 654eafe..aa58e59 100644
--- a/drivers/staging/rdma/hfi1/chip.c
+++ b/drivers/staging/rdma/hfi1/chip.c
@@ -2710,7 +2710,7 @@
 	if (sleep_ok) {
 		mutex_lock(&ppd->hls_lock);
 	} else {
-		while (mutex_trylock(&ppd->hls_lock) == EBUSY)
+		while (!mutex_trylock(&ppd->hls_lock))
 			udelay(1);
 	}
 
@@ -2758,7 +2758,7 @@
 	if (sleep_ok) {
 		mutex_lock(&dd->pport->hls_lock);
 	} else {
-		while (mutex_trylock(&dd->pport->hls_lock) == EBUSY)
+		while (!mutex_trylock(&dd->pport->hls_lock))
 			udelay(1);
 	}
 
diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/staging/rdma/hfi1/device.c
index 07c87a87..bc26a53 100644
--- a/drivers/staging/rdma/hfi1/device.c
+++ b/drivers/staging/rdma/hfi1/device.c
@@ -57,11 +57,13 @@
 #include "device.h"
 
 static struct class *class;
+static struct class *user_class;
 static dev_t hfi1_dev;
 
 int hfi1_cdev_init(int minor, const char *name,
 		   const struct file_operations *fops,
-		   struct cdev *cdev, struct device **devp)
+		   struct cdev *cdev, struct device **devp,
+		   bool user_accessible)
 {
 	const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor);
 	struct device *device = NULL;
@@ -78,7 +80,11 @@
 		goto done;
 	}
 
-	device = device_create(class, NULL, dev, NULL, "%s", name);
+	if (user_accessible)
+		device = device_create(user_class, NULL, dev, NULL, "%s", name);
+	else
+		device = device_create(class, NULL, dev, NULL, "%s", name);
+
 	if (!IS_ERR(device))
 		goto done;
 	ret = PTR_ERR(device);
@@ -110,6 +116,26 @@
 	return hfi1_class_name;
 }
 
+static char *hfi1_devnode(struct device *dev, umode_t *mode)
+{
+	if (mode)
+		*mode = 0600;
+	return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+}
+
+static const char *hfi1_class_name_user = "hfi1_user";
+const char *class_name_user(void)
+{
+	return hfi1_class_name_user;
+}
+
+static char *hfi1_user_devnode(struct device *dev, umode_t *mode)
+{
+	if (mode)
+		*mode = 0666;
+	return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+}
+
 int __init dev_init(void)
 {
 	int ret;
@@ -125,7 +151,22 @@
 		ret = PTR_ERR(class);
 		pr_err("Could not create device class (err %d)\n", -ret);
 		unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
+		goto done;
 	}
+	class->devnode = hfi1_devnode;
+
+	user_class = class_create(THIS_MODULE, class_name_user());
+	if (IS_ERR(user_class)) {
+		ret = PTR_ERR(user_class);
+		pr_err("Could not create device class for user accessible files (err %d)\n",
+		       -ret);
+		class_destroy(class);
+		class = NULL;
+		user_class = NULL;
+		unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
+		goto done;
+	}
+	user_class->devnode = hfi1_user_devnode;
 
 done:
 	return ret;
@@ -133,10 +174,11 @@
 
 void dev_cleanup(void)
 {
-	if (class) {
-		class_destroy(class);
-		class = NULL;
-	}
+	class_destroy(class);
+	class = NULL;
+
+	class_destroy(user_class);
+	user_class = NULL;
 
 	unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
 }
diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/staging/rdma/hfi1/device.h
index 98caecd..2850ff7 100644
--- a/drivers/staging/rdma/hfi1/device.h
+++ b/drivers/staging/rdma/hfi1/device.h
@@ -52,7 +52,8 @@
 
 int hfi1_cdev_init(int minor, const char *name,
 		   const struct file_operations *fops,
-		   struct cdev *cdev, struct device **devp);
+		   struct cdev *cdev, struct device **devp,
+		   bool user_accessible);
 void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp);
 const char *class_name(void);
 int __init dev_init(void);
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c
index 6777d6b..3e8d5ac 100644
--- a/drivers/staging/rdma/hfi1/diag.c
+++ b/drivers/staging/rdma/hfi1/diag.c
@@ -292,7 +292,7 @@
 	if (atomic_inc_return(&diagpkt_count) == 1) {
 		ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name,
 				     &diagpkt_file_ops, &diagpkt_cdev,
-				     &diagpkt_device);
+				     &diagpkt_device, false);
 	}
 
 	return ret;
@@ -592,7 +592,8 @@
 
 	ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name,
 			     &snoop_file_ops,
-			     &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev);
+			     &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev,
+			     false);
 
 	if (ret) {
 		dd_dev_err(dd, "Couldn't create %s device: %d", name, ret);
@@ -1012,11 +1013,10 @@
 		case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA:
 			memset(&link_info, 0, sizeof(link_info));
 
-			ret = copy_from_user(&link_info,
+			if (copy_from_user(&link_info,
 				(struct hfi1_link_info __user *)arg,
-				sizeof(link_info));
-			if (ret)
-				break;
+				sizeof(link_info)))
+				ret = -EFAULT;
 
 			value = link_info.port_state;
 			index = link_info.port_number;
@@ -1080,9 +1080,10 @@
 		case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA:
 			if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) {
 				memset(&link_info, 0, sizeof(link_info));
-				ret = copy_from_user(&link_info,
+				if (copy_from_user(&link_info,
 					(struct hfi1_link_info __user *)arg,
-					sizeof(link_info));
+					sizeof(link_info)))
+					ret = -EFAULT;
 				index = link_info.port_number;
 			} else {
 				ret = __get_user(index, (int __user *) arg);
@@ -1114,9 +1115,10 @@
 							ppd->link_speed_active;
 				link_info.link_width_active =
 							ppd->link_width_active;
-				ret = copy_to_user(
+				if (copy_to_user(
 					(struct hfi1_link_info __user *)arg,
-					&link_info, sizeof(link_info));
+					&link_info, sizeof(link_info)))
+					ret = -EFAULT;
 			} else {
 				ret = __put_user(value, (int __user *)arg);
 			}
@@ -1142,10 +1144,9 @@
 			snoop_dbg("Setting filter");
 			/* just copy command structure */
 			argp = (unsigned long *)arg;
-			ret = copy_from_user(&filter_cmd, (void __user *)argp,
-					     sizeof(filter_cmd));
-			if (ret < 0) {
-				pr_alert("Error copying filter command\n");
+			if (copy_from_user(&filter_cmd, (void __user *)argp,
+					     sizeof(filter_cmd))) {
+				ret = -EFAULT;
 				break;
 			}
 			if (filter_cmd.opcode >= HFI1_MAX_FILTERS) {
@@ -1167,12 +1168,11 @@
 				break;
 			}
 			/* copy remaining data from userspace */
-			ret = copy_from_user((u8 *)filter_value,
+			if (copy_from_user((u8 *)filter_value,
 					(void __user *)filter_cmd.value_ptr,
-					filter_cmd.length);
-			if (ret < 0) {
+					filter_cmd.length)) {
 				kfree(filter_value);
-				pr_alert("Error copying filter data\n");
+				ret = -EFAULT;
 				break;
 			}
 			/* Drain packets first */
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index 4698617..72d3850 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -1181,6 +1181,7 @@
 	struct hfi1_filedata *fd = fp->private_data;
 	int ret = 0;
 
+	memset(&cinfo, 0, sizeof(cinfo));
 	ret = hfi1_get_base_kinfo(uctxt, &cinfo);
 	if (ret < 0)
 		goto done;
@@ -2089,14 +2090,16 @@
 
 	if (atomic_inc_return(&user_count) == 1) {
 		ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
-				     &wildcard_cdev, &wildcard_device);
+				     &wildcard_cdev, &wildcard_device,
+				     true);
 		if (ret)
 			goto done;
 	}
 
 	snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
 	ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
-			     &dd->user_cdev, &dd->user_device);
+			     &dd->user_cdev, &dd->user_device,
+			     true);
 	if (ret)
 		goto done;
 
@@ -2104,7 +2107,8 @@
 		snprintf(name, sizeof(name),
 			 "%s_ui%d", class_name(), dd->unit);
 		ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
-				     &dd->ui_cdev, &dd->ui_device);
+				     &dd->ui_cdev, &dd->ui_device,
+				     false);
 		if (ret)
 			goto done;
 	}
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c
index 37269eb..b2c1b72 100644
--- a/drivers/staging/rdma/hfi1/mad.c
+++ b/drivers/staging/rdma/hfi1/mad.c
@@ -1717,9 +1717,9 @@
 	psi->port_states.portphysstate_portstate =
 		(hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
 	psi->link_width_downgrade_tx_active =
-	  ppd->link_width_downgrade_tx_active;
+		cpu_to_be16(ppd->link_width_downgrade_tx_active);
 	psi->link_width_downgrade_rx_active =
-	  ppd->link_width_downgrade_rx_active;
+		cpu_to_be16(ppd->link_width_downgrade_rx_active);
 	if (resp_len)
 		*resp_len += sizeof(struct opa_port_state_info);
 
diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c
index a8c903c..aecd1a7 100644
--- a/drivers/staging/rdma/hfi1/sdma.c
+++ b/drivers/staging/rdma/hfi1/sdma.c
@@ -737,7 +737,7 @@
 	 */
 	if (!is_power_of_2(count))
 		return SDMA_DESCQ_CNT;
-	if (count < 64 && count > 32768)
+	if (count < 64 || count > 32768)
 		return SDMA_DESCQ_CNT;
 	return count;
 }
@@ -1848,7 +1848,7 @@
 			dd_dev_err(sde->dd,
 				"\taidx: %u amode: %u alen: %u\n",
 				(u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
-					>> SDMA_DESC1_HEADER_INDEX_MASK),
+					>> SDMA_DESC1_HEADER_INDEX_SHIFT),
 				(u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
 					>> SDMA_DESC1_HEADER_MODE_SHIFT),
 				(u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK)
@@ -1926,7 +1926,7 @@
 		if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
 			seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
 				(u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
-					>> SDMA_DESC1_HEADER_INDEX_MASK),
+					>> SDMA_DESC1_HEADER_INDEX_SHIFT),
 				(u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
 					>> SDMA_DESC1_HEADER_MODE_SHIFT));
 		head = (head + 1) & sde->sdma_mask;
diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h
index 1e613fc..4960869 100644
--- a/drivers/staging/rdma/hfi1/sdma.h
+++ b/drivers/staging/rdma/hfi1/sdma.h
@@ -109,53 +109,53 @@
 /*
  * Bits defined in the send DMA descriptor.
  */
-#define SDMA_DESC0_FIRST_DESC_FLAG      (1ULL<<63)
-#define SDMA_DESC0_LAST_DESC_FLAG       (1ULL<<62)
+#define SDMA_DESC0_FIRST_DESC_FLAG      (1ULL << 63)
+#define SDMA_DESC0_LAST_DESC_FLAG       (1ULL << 62)
 #define SDMA_DESC0_BYTE_COUNT_SHIFT     48
 #define SDMA_DESC0_BYTE_COUNT_WIDTH     14
 #define SDMA_DESC0_BYTE_COUNT_MASK \
-	((1ULL<<SDMA_DESC0_BYTE_COUNT_WIDTH)-1ULL)
+	((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
 #define SDMA_DESC0_BYTE_COUNT_SMASK \
-	(SDMA_DESC0_BYTE_COUNT_MASK<<SDMA_DESC0_BYTE_COUNT_SHIFT)
+	(SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
 #define SDMA_DESC0_PHY_ADDR_SHIFT       0
 #define SDMA_DESC0_PHY_ADDR_WIDTH       48
 #define SDMA_DESC0_PHY_ADDR_MASK \
-	((1ULL<<SDMA_DESC0_PHY_ADDR_WIDTH)-1ULL)
+	((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
 #define SDMA_DESC0_PHY_ADDR_SMASK \
-	(SDMA_DESC0_PHY_ADDR_MASK<<SDMA_DESC0_PHY_ADDR_SHIFT)
+	(SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
 
 #define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
 #define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
 #define SDMA_DESC1_HEADER_UPDATE1_MASK \
-	((1ULL<<SDMA_DESC1_HEADER_UPDATE1_WIDTH)-1ULL)
+	((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
 #define SDMA_DESC1_HEADER_UPDATE1_SMASK \
-	(SDMA_DESC1_HEADER_UPDATE1_MASK<<SDMA_DESC1_HEADER_UPDATE1_SHIFT)
+	(SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
 #define SDMA_DESC1_HEADER_MODE_SHIFT    13
 #define SDMA_DESC1_HEADER_MODE_WIDTH    3
 #define SDMA_DESC1_HEADER_MODE_MASK \
-	((1ULL<<SDMA_DESC1_HEADER_MODE_WIDTH)-1ULL)
+	((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
 #define SDMA_DESC1_HEADER_MODE_SMASK \
-	(SDMA_DESC1_HEADER_MODE_MASK<<SDMA_DESC1_HEADER_MODE_SHIFT)
+	(SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
 #define SDMA_DESC1_HEADER_INDEX_SHIFT   8
 #define SDMA_DESC1_HEADER_INDEX_WIDTH   5
 #define SDMA_DESC1_HEADER_INDEX_MASK \
-	((1ULL<<SDMA_DESC1_HEADER_INDEX_WIDTH)-1ULL)
+	((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
 #define SDMA_DESC1_HEADER_INDEX_SMASK \
-	(SDMA_DESC1_HEADER_INDEX_MASK<<SDMA_DESC1_HEADER_INDEX_SHIFT)
+	(SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
 #define SDMA_DESC1_HEADER_DWS_SHIFT     4
 #define SDMA_DESC1_HEADER_DWS_WIDTH     4
 #define SDMA_DESC1_HEADER_DWS_MASK \
-	((1ULL<<SDMA_DESC1_HEADER_DWS_WIDTH)-1ULL)
+	((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
 #define SDMA_DESC1_HEADER_DWS_SMASK \
-	(SDMA_DESC1_HEADER_DWS_MASK<<SDMA_DESC1_HEADER_DWS_SHIFT)
+	(SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
 #define SDMA_DESC1_GENERATION_SHIFT     2
 #define SDMA_DESC1_GENERATION_WIDTH     2
 #define SDMA_DESC1_GENERATION_MASK \
-	((1ULL<<SDMA_DESC1_GENERATION_WIDTH)-1ULL)
+	((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
 #define SDMA_DESC1_GENERATION_SMASK \
-	(SDMA_DESC1_GENERATION_MASK<<SDMA_DESC1_GENERATION_SHIFT)
-#define SDMA_DESC1_INT_REQ_FLAG         (1ULL<<1)
-#define SDMA_DESC1_HEAD_TO_HOST_FLAG    (1ULL<<0)
+	(SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
+#define SDMA_DESC1_INT_REQ_FLAG         (1ULL << 1)
+#define SDMA_DESC1_HEAD_TO_HOST_FLAG    (1ULL << 0)
 
 enum sdma_states {
 	sdma_state_s00_hw_down,
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c
index 53ac214..41bb59e 100644
--- a/drivers/staging/rdma/hfi1/verbs.c
+++ b/drivers/staging/rdma/hfi1/verbs.c
@@ -749,11 +749,13 @@
 	struct verbs_txreq *tx;
 
 	tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
-	if (!tx)
+	if (!tx) {
 		/* call slow path to get the lock */
 		tx =  __get_txreq(dev, qp);
-	if (tx)
-		tx->qp = qp;
+		if (IS_ERR(tx))
+			return tx;
+	}
+	tx->qp = qp;
 	return tx;
 }
 
diff --git a/drivers/staging/unisys/visorbus/Makefile b/drivers/staging/unisys/visorbus/Makefile
index fa27ee5..fc790e7 100644
--- a/drivers/staging/unisys/visorbus/Makefile
+++ b/drivers/staging/unisys/visorbus/Makefile
@@ -10,4 +10,3 @@
 visorbus-y += periodic_work.o
 
 ccflags-y += -Idrivers/staging/unisys/include
-ccflags-y += -Idrivers/staging/unisys/visorutil
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 2309f5f..a272b48 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -37,6 +37,8 @@
 #define POLLJIFFIES_TESTWORK         100
 #define POLLJIFFIES_NORMALCHANNEL     10
 
+static int busreg_rc = -ENODEV; /* stores the result from bus registration */
+
 static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env);
 static int visorbus_match(struct device *xdev, struct device_driver *xdrv);
 static void fix_vbus_dev_info(struct visor_device *visordev);
@@ -863,6 +865,9 @@
 {
 	int rc = 0;
 
+	if (busreg_rc < 0)
+		return -ENODEV; /*can't register on a nonexistent bus*/
+
 	drv->driver.name = drv->name;
 	drv->driver.bus = &visorbus_type;
 	drv->driver.probe = visordriver_probe_device;
@@ -885,6 +890,8 @@
 	if (rc < 0)
 		return rc;
 	rc = register_driver_attributes(drv);
+	if (rc < 0)
+		driver_unregister(&drv->driver);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
@@ -1260,10 +1267,8 @@
 static int
 create_bus_type(void)
 {
-	int rc = 0;
-
-	rc = bus_register(&visorbus_type);
-	return rc;
+	busreg_rc = bus_register(&visorbus_type);
+	return busreg_rc;
 }
 
 /** Remove the one-and-only one instance of the visor bus type (visorbus_type).
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 8c9da7e..9d3c1e2 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1189,16 +1189,16 @@
 	spin_lock_irqsave(&devdata->priv_lock, flags);
 	atomic_dec(&devdata->num_rcvbuf_in_iovm);
 
-	/* update rcv stats - call it with priv_lock held */
-	devdata->net_stats.rx_packets++;
-	devdata->net_stats.rx_bytes = skb->len;
-
 	/* set length to how much was ACTUALLY received -
 	 * NOTE: rcv_done_len includes actual length of data rcvd
 	 * including ethhdr
 	 */
 	skb->len = cmdrsp->net.rcv.rcv_done_len;
 
+	/* update rcv stats - call it with priv_lock held */
+	devdata->net_stats.rx_packets++;
+	devdata->net_stats.rx_bytes += skb->len;
+
 	/* test enabled while holding lock */
 	if (!(devdata->enabled && devdata->enab_dis_acked)) {
 		/* don't process it unless we're in enable mode and until
@@ -1924,13 +1924,16 @@
 			"%s debugfs_create_dir %s failed\n",
 			__func__, netdev->name);
 		err = -ENOMEM;
-		goto cleanup_xmit_cmdrsp;
+		goto cleanup_register_netdev;
 	}
 
 	dev_info(&dev->device, "%s success netdev=%s\n",
 		 __func__, netdev->name);
 	return 0;
 
+cleanup_register_netdev:
+	unregister_netdev(netdev);
+
 cleanup_napi_add:
 	del_timer_sync(&devdata->irq_poll_timer);
 	netif_napi_del(&devdata->napi);
@@ -2128,8 +2131,9 @@
 	if (!dev_num_pool)
 		goto cleanup_workqueue;
 
-	visorbus_register_visor_driver(&visornic_driver);
-	return 0;
+	err = visorbus_register_visor_driver(&visornic_driver);
+	if (!err)
+		return 0;
 
 cleanup_workqueue:
 	if (visornic_timeout_reset_workqueue) {
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index fd09290..342a07c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -269,14 +269,14 @@
 }
 
 bool iscsit_check_np_match(
-	struct __kernel_sockaddr_storage *sockaddr,
+	struct sockaddr_storage *sockaddr,
 	struct iscsi_np *np,
 	int network_transport)
 {
 	struct sockaddr_in *sock_in, *sock_in_e;
 	struct sockaddr_in6 *sock_in6, *sock_in6_e;
 	bool ip_match = false;
-	u16 port;
+	u16 port, port_e;
 
 	if (sockaddr->ss_family == AF_INET6) {
 		sock_in6 = (struct sockaddr_in6 *)sockaddr;
@@ -288,6 +288,7 @@
 			ip_match = true;
 
 		port = ntohs(sock_in6->sin6_port);
+		port_e = ntohs(sock_in6_e->sin6_port);
 	} else {
 		sock_in = (struct sockaddr_in *)sockaddr;
 		sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
@@ -296,9 +297,10 @@
 			ip_match = true;
 
 		port = ntohs(sock_in->sin_port);
+		port_e = ntohs(sock_in_e->sin_port);
 	}
 
-	if (ip_match && (np->np_port == port) &&
+	if (ip_match && (port_e == port) &&
 	    (np->np_network_transport == network_transport))
 		return true;
 
@@ -309,7 +311,7 @@
  * Called with mutex np_lock held
  */
 static struct iscsi_np *iscsit_get_np(
-	struct __kernel_sockaddr_storage *sockaddr,
+	struct sockaddr_storage *sockaddr,
 	int network_transport)
 {
 	struct iscsi_np *np;
@@ -340,12 +342,9 @@
 }
 
 struct iscsi_np *iscsit_add_np(
-	struct __kernel_sockaddr_storage *sockaddr,
-	char *ip_str,
+	struct sockaddr_storage *sockaddr,
 	int network_transport)
 {
-	struct sockaddr_in *sock_in;
-	struct sockaddr_in6 *sock_in6;
 	struct iscsi_np *np;
 	int ret;
 
@@ -368,16 +367,6 @@
 	}
 
 	np->np_flags |= NPF_IP_NETWORK;
-	if (sockaddr->ss_family == AF_INET6) {
-		sock_in6 = (struct sockaddr_in6 *)sockaddr;
-		snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
-		np->np_port = ntohs(sock_in6->sin6_port);
-	} else {
-		sock_in = (struct sockaddr_in *)sockaddr;
-		sprintf(np->np_ip, "%s", ip_str);
-		np->np_port = ntohs(sock_in->sin_port);
-	}
-
 	np->np_network_transport = network_transport;
 	spin_lock_init(&np->np_thread_lock);
 	init_completion(&np->np_restart_comp);
@@ -411,8 +400,8 @@
 	list_add_tail(&np->np_list, &g_np_list);
 	mutex_unlock(&np_lock);
 
-	pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
-		np->np_ip, np->np_port, np->np_transport->name);
+	pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
+		&np->np_sockaddr, np->np_transport->name);
 
 	return np;
 }
@@ -481,8 +470,8 @@
 	list_del(&np->np_list);
 	mutex_unlock(&np_lock);
 
-	pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
-		np->np_ip, np->np_port, np->np_transport->name);
+	pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
+		&np->np_sockaddr, np->np_transport->name);
 
 	iscsit_put_transport(np->np_transport);
 	kfree(np);
@@ -1209,7 +1198,6 @@
 	u8 *pad_bytes)
 {
 	u32 data_crc;
-	u32 i;
 	struct scatterlist *sg;
 	unsigned int page_off;
 
@@ -1218,15 +1206,15 @@
 	sg = cmd->first_data_sg;
 	page_off = cmd->first_data_sg_off;
 
-	i = 0;
 	while (data_length) {
-		u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off));
+		u32 cur_len = min_t(u32, data_length, (sg->length - page_off));
 
-		crypto_hash_update(hash, &sg[i], cur_len);
+		crypto_hash_update(hash, sg, cur_len);
 
 		data_length -= cur_len;
 		page_off = 0;
-		i++;
+		/* iscsit_map_iovec has already checked for invalid sg pointers */
+		sg = sg_next(sg);
 	}
 
 	if (padding) {
@@ -2556,7 +2544,7 @@
 	cmd->stat_sn		= conn->stat_sn++;
 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
-	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
 	hdr->async_event	= ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
 	hdr->param1		= cpu_to_be16(cmd->logout_cid);
 	hdr->param2		= cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
@@ -2628,7 +2616,7 @@
 		hdr->statsn		= cpu_to_be32(0xFFFFFFFF);
 
 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
-	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
 	hdr->datasn		= cpu_to_be32(datain->data_sn);
 	hdr->offset		= cpu_to_be32(datain->offset);
 
@@ -2839,7 +2827,7 @@
 
 	iscsit_increment_maxcmdsn(cmd, conn->sess);
 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
-	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
 
 	pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
 		" 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
@@ -2902,7 +2890,7 @@
 		iscsit_increment_maxcmdsn(cmd, conn->sess);
 
 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
-	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
 
 	pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
 		" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
@@ -3049,7 +3037,7 @@
 	hdr->ttt		= cpu_to_be32(r2t->targ_xfer_tag);
 	hdr->statsn		= cpu_to_be32(conn->stat_sn);
 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
-	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
 	hdr->r2tsn		= cpu_to_be32(r2t->r2t_sn);
 	hdr->data_offset	= cpu_to_be32(r2t->offset);
 	hdr->data_length	= cpu_to_be32(r2t->xfer_len);
@@ -3202,7 +3190,7 @@
 
 	iscsit_increment_maxcmdsn(cmd, conn->sess);
 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
-	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
 
 	pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
 		" Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
@@ -3321,7 +3309,7 @@
 
 	iscsit_increment_maxcmdsn(cmd, conn->sess);
 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
-	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
 
 	pr_debug("Built Task Management Response ITT: 0x%08x,"
 		" StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
@@ -3399,6 +3387,7 @@
 	int target_name_printed;
 	unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
 	unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
+	bool active;
 
 	buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
 			 SENDTARGETS_BUF_LIMIT);
@@ -3452,19 +3441,18 @@
 			}
 
 			spin_lock(&tpg->tpg_state_lock);
-			if ((tpg->tpg_state == TPG_STATE_FREE) ||
-			    (tpg->tpg_state == TPG_STATE_INACTIVE)) {
-				spin_unlock(&tpg->tpg_state_lock);
-				continue;
-			}
+			active = (tpg->tpg_state == TPG_STATE_ACTIVE);
 			spin_unlock(&tpg->tpg_state_lock);
 
+			if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets)
+				continue;
+
 			spin_lock(&tpg->tpg_np_lock);
 			list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
 						tpg_np_list) {
 				struct iscsi_np *np = tpg_np->tpg_np;
 				bool inaddr_any = iscsit_check_inaddr_any(np);
-				char *fmt_str;
+				struct sockaddr_storage *sockaddr;
 
 				if (np->np_network_transport != network_transport)
 					continue;
@@ -3492,15 +3480,15 @@
 					}
 				}
 
-				if (np->np_sockaddr.ss_family == AF_INET6)
-					fmt_str = "TargetAddress=[%s]:%hu,%hu";
+				if (inaddr_any)
+					sockaddr = &conn->local_sockaddr;
 				else
-					fmt_str = "TargetAddress=%s:%hu,%hu";
+					sockaddr = &np->np_sockaddr;
 
-				len = sprintf(buf, fmt_str,
-					inaddr_any ? conn->local_ip : np->np_ip,
-					np->np_port,
-					tpg->tpgt);
+				len = sprintf(buf, "TargetAddress="
+					      "%pISpc,%hu",
+					      sockaddr,
+					      tpg->tpgt);
 				len += 1;
 
 				if ((len + payload_len) > buffer_len) {
@@ -3576,7 +3564,7 @@
 	 */
 	cmd->maxcmdsn_inc = 0;
 	hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
-	hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+	hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
 
 	pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
 		" Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
@@ -3654,7 +3642,7 @@
 	cmd->stat_sn		= conn->stat_sn++;
 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
-	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
 
 }
 EXPORT_SYMBOL(iscsit_build_reject);
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 7d0f9c0..4cf2c0f 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -10,10 +10,10 @@
 extern void iscsit_login_kref_put(struct kref *);
 extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *,
 				struct iscsi_tpg_np *);
-extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
+extern bool iscsit_check_np_match(struct sockaddr_storage *,
 				struct iscsi_np *, int);
-extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
-				char *, int);
+extern struct iscsi_np *iscsit_add_np(struct sockaddr_storage *,
+				int);
 extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
 				struct iscsi_portal_group *, bool);
 extern int iscsit_del_np(struct iscsi_np *);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index c1898c8..c7461d7 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -99,7 +99,7 @@
 		 * Use existing np->np_sockaddr for SCTP network portal reference
 		 */
 		tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
-					np->np_ip, tpg_np, ISCSI_SCTP_TCP);
+					tpg_np, ISCSI_SCTP_TCP);
 		if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
 			goto out;
 	} else {
@@ -177,7 +177,7 @@
 		}
 
 		tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
-				np->np_ip, tpg_np, ISCSI_INFINIBAND);
+				tpg_np, ISCSI_INFINIBAND);
 		if (IS_ERR(tpg_np_iser)) {
 			rc = PTR_ERR(tpg_np_iser);
 			goto out;
@@ -220,7 +220,7 @@
 	struct iscsi_portal_group *tpg;
 	struct iscsi_tpg_np *tpg_np;
 	char *str, *str2, *ip_str, *port_str;
-	struct __kernel_sockaddr_storage sockaddr;
+	struct sockaddr_storage sockaddr;
 	struct sockaddr_in *sock_in;
 	struct sockaddr_in6 *sock_in6;
 	unsigned long port;
@@ -235,7 +235,7 @@
 	memset(buf, 0, MAX_PORTAL_LEN + 1);
 	snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
 
-	memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage));
+	memset(&sockaddr, 0, sizeof(struct sockaddr_storage));
 
 	str = strstr(buf, "[");
 	if (str) {
@@ -248,8 +248,8 @@
 			return ERR_PTR(-EINVAL);
 		}
 		str++; /* Skip over leading "[" */
-		*str2 = '\0'; /* Terminate the IPv6 address */
-		str2++; /* Skip over the "]" */
+		*str2 = '\0'; /* Terminate the unbracketed IPv6 address */
+		str2++; /* Skip over the \0 */
 		port_str = strstr(str2, ":");
 		if (!port_str) {
 			pr_err("Unable to locate \":port\""
@@ -267,7 +267,7 @@
 		sock_in6 = (struct sockaddr_in6 *)&sockaddr;
 		sock_in6->sin6_family = AF_INET6;
 		sock_in6->sin6_port = htons((unsigned short)port);
-		ret = in6_pton(str, IPV6_ADDRESS_SPACE,
+		ret = in6_pton(str, -1,
 				(void *)&sock_in6->sin6_addr.in6_u, -1, &end);
 		if (ret <= 0) {
 			pr_err("in6_pton returned: %d\n", ret);
@@ -316,7 +316,7 @@
 	 * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
 	 *
 	 */
-	tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
+	tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, NULL,
 				ISCSI_TCP);
 	if (IS_ERR(tpg_np)) {
 		iscsit_put_tpg(tpg);
@@ -344,8 +344,8 @@
 
 	se_tpg = &tpg->tpg_se_tpg;
 	pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
-		" PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
-		tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
+		" PORTAL: %pISpc\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+		tpg->tpgt, &tpg_np->tpg_np->np_sockaddr);
 
 	ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
 	if (ret < 0)
@@ -656,6 +656,7 @@
 	struct iscsi_conn *conn;
 	struct se_session *se_sess;
 	ssize_t rb = 0;
+	u32 max_cmd_sn;
 
 	spin_lock_bh(&se_nacl->nacl_sess_lock);
 	se_sess = se_nacl->nacl_sess;
@@ -703,11 +704,12 @@
 				" Values]-----------------------\n");
 		rb += sprintf(page+rb, "  CmdSN/WR  :  CmdSN/WC  :  ExpCmdSN"
 				"  :  MaxCmdSN  :     ITT    :     TTT\n");
+		max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn);
 		rb += sprintf(page+rb, " 0x%08x   0x%08x   0x%08x   0x%08x"
 				"   0x%08x   0x%08x\n",
 			sess->cmdsn_window,
-			(sess->max_cmd_sn - sess->exp_cmd_sn) + 1,
-			sess->exp_cmd_sn, sess->max_cmd_sn,
+			(max_cmd_sn - sess->exp_cmd_sn) + 1,
+			sess->exp_cmd_sn, max_cmd_sn,
 			sess->init_task_tag, sess->targ_xfer_tag);
 		rb += sprintf(page+rb, "----------------------[iSCSI"
 				" Connections]-------------------------\n");
@@ -751,7 +753,7 @@
 				break;
 			}
 
-			rb += sprintf(page+rb, "   Address %s %s", conn->login_ip,
+			rb += sprintf(page+rb, "   Address %pISc %s", &conn->login_sockaddr,
 				(conn->network_transport == ISCSI_TCP) ?
 				"TCP" : "SCTP");
 			rb += sprintf(page+rb, "  StatSN: 0x%08x\n",
@@ -1010,6 +1012,11 @@
  */
 DEF_TPG_ATTRIB(fabric_prot_type);
 TPG_ATTR(fabric_prot_type, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_tpg_enabled_sendtargets
+ */
+DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
+TPG_ATTR(tpg_enabled_sendtargets, S_IRUGO | S_IWUSR);
 
 static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
 	&iscsi_tpg_attrib_authentication.attr,
@@ -1024,6 +1031,7 @@
 	&iscsi_tpg_attrib_default_erl.attr,
 	&iscsi_tpg_attrib_t10_pi.attr,
 	&iscsi_tpg_attrib_fabric_prot_type.attr,
+	&iscsi_tpg_attrib_tpg_enabled_sendtargets.attr,
 	NULL,
 };
 
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 5fabcd3..0382fa2 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -47,19 +47,19 @@
 	 * core_set_queue_depth_for_node().
 	 */
 	sess->cmdsn_window = se_nacl->queue_depth;
-	sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1;
+	atomic_add(se_nacl->queue_depth - 1, &sess->max_cmd_sn);
 }
 
 void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
 {
+	u32 max_cmd_sn;
+
 	if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
 		return;
 
 	cmd->maxcmdsn_inc = 1;
 
-	mutex_lock(&sess->cmdsn_mutex);
-	sess->max_cmd_sn += 1;
-	pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
-	mutex_unlock(&sess->cmdsn_mutex);
+	max_cmd_sn = atomic_inc_return(&sess->max_cmd_sn);
+	pr_debug("Updated MaxCmdSN to 0x%08x\n", max_cmd_sn);
 }
 EXPORT_SYMBOL(iscsit_increment_maxcmdsn);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 7e8f65e..96e78c8 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -331,7 +331,7 @@
 	 * The FFP CmdSN window values will be allocated from the TPG's
 	 * Initiator Node's ACL once the login has been successfully completed.
 	 */
-	sess->max_cmd_sn	= be32_to_cpu(pdu->cmdsn);
+	atomic_set(&sess->max_cmd_sn, be32_to_cpu(pdu->cmdsn));
 
 	sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
 	if (!sess->sess_ops) {
@@ -729,9 +729,9 @@
 			stop_timer = 1;
 		}
 
-		pr_debug("iSCSI Login successful on CID: %hu from %s to"
-			" %s:%hu,%hu\n", conn->cid, conn->login_ip,
-			conn->local_ip, conn->local_port, tpg->tpgt);
+		pr_debug("iSCSI Login successful on CID: %hu from %pISpc to"
+			" %pISpc,%hu\n", conn->cid, &conn->login_sockaddr,
+			&conn->local_sockaddr, tpg->tpgt);
 
 		list_add_tail(&conn->conn_list, &sess->sess_conn_list);
 		atomic_inc(&sess->nconn);
@@ -776,8 +776,8 @@
 	pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
 	sess->session_state = TARG_SESS_STATE_LOGGED_IN;
 
-	pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
-		conn->cid, conn->login_ip, conn->local_ip, conn->local_port,
+	pr_debug("iSCSI Login successful on CID: %hu from %pISpc to %pISpc,%hu\n",
+		conn->cid, &conn->login_sockaddr, &conn->local_sockaddr,
 		tpg->tpgt);
 
 	spin_lock_bh(&sess->conn_lock);
@@ -823,8 +823,8 @@
 	struct iscsi_np *np = (struct iscsi_np *) data;
 
 	spin_lock_bh(&np->np_thread_lock);
-	pr_err("iSCSI Login timeout on Network Portal %s:%hu\n",
-			np->np_ip, np->np_port);
+	pr_err("iSCSI Login timeout on Network Portal %pISpc\n",
+			&np->np_sockaddr);
 
 	if (np->np_login_timer_flags & ISCSI_TF_STOP) {
 		spin_unlock_bh(&np->np_thread_lock);
@@ -877,7 +877,7 @@
 
 int iscsit_setup_np(
 	struct iscsi_np *np,
-	struct __kernel_sockaddr_storage *sockaddr)
+	struct sockaddr_storage *sockaddr)
 {
 	struct socket *sock = NULL;
 	int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len;
@@ -916,7 +916,7 @@
 	 * in iscsi_target_configfs.c code..
 	 */
 	memcpy(&np->np_sockaddr, sockaddr,
-			sizeof(struct __kernel_sockaddr_storage));
+			sizeof(struct sockaddr_storage));
 
 	if (sockaddr->ss_family == AF_INET6)
 		len = sizeof(struct sockaddr_in6);
@@ -975,7 +975,7 @@
 
 int iscsi_target_setup_login_socket(
 	struct iscsi_np *np,
-	struct __kernel_sockaddr_storage *sockaddr)
+	struct sockaddr_storage *sockaddr)
 {
 	struct iscsit_transport *t;
 	int rc;
@@ -1015,44 +1015,42 @@
 		rc = conn->sock->ops->getname(conn->sock,
 				(struct sockaddr *)&sock_in6, &err, 1);
 		if (!rc) {
-			if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr))
-				snprintf(conn->login_ip, sizeof(conn->login_ip), "[%pI6c]",
-					&sock_in6.sin6_addr.in6_u);
-			else
-				snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI4",
-					&sock_in6.sin6_addr.s6_addr32[3]);
-			conn->login_port = ntohs(sock_in6.sin6_port);
+			if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
+				memcpy(&conn->login_sockaddr, &sock_in6, sizeof(sock_in6));
+			} else {
+				/* Pretend to be an ipv4 socket */
+				sock_in.sin_family = AF_INET;
+				sock_in.sin_port = sock_in6.sin6_port;
+				memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4);
+				memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in));
+			}
 		}
 
 		rc = conn->sock->ops->getname(conn->sock,
 				(struct sockaddr *)&sock_in6, &err, 0);
 		if (!rc) {
-			if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr))
-				snprintf(conn->local_ip, sizeof(conn->local_ip), "[%pI6c]",
-					&sock_in6.sin6_addr.in6_u);
-			else
-				snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI4",
-					&sock_in6.sin6_addr.s6_addr32[3]);
-			conn->local_port = ntohs(sock_in6.sin6_port);
+			if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
+				memcpy(&conn->local_sockaddr, &sock_in6, sizeof(sock_in6));
+			} else {
+				/* Pretend to be an ipv4 socket */
+				sock_in.sin_family = AF_INET;
+				sock_in.sin_port = sock_in6.sin6_port;
+				memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4);
+				memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in));
+			}
 		}
 	} else {
 		memset(&sock_in, 0, sizeof(struct sockaddr_in));
 
 		rc = conn->sock->ops->getname(conn->sock,
 				(struct sockaddr *)&sock_in, &err, 1);
-		if (!rc) {
-			sprintf(conn->login_ip, "%pI4",
-					&sock_in.sin_addr.s_addr);
-			conn->login_port = ntohs(sock_in.sin_port);
-		}
+		if (!rc)
+			memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in));
 
 		rc = conn->sock->ops->getname(conn->sock,
 				(struct sockaddr *)&sock_in, &err, 0);
-		if (!rc) {
-			sprintf(conn->local_ip, "%pI4",
-					&sock_in.sin_addr.s_addr);
-			conn->local_port = ntohs(sock_in.sin_port);
-		}
+		if (!rc)
+			memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in));
 	}
 
 	return 0;
@@ -1302,8 +1300,8 @@
 	spin_lock_bh(&np->np_thread_lock);
 	if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
 		spin_unlock_bh(&np->np_thread_lock);
-		pr_err("iSCSI Network Portal on %s:%hu currently not"
-			" active.\n", np->np_ip, np->np_port);
+		pr_err("iSCSI Network Portal on %pISpc currently not"
+			" active.\n", &np->np_sockaddr);
 		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
 				ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
 		goto new_sess_out;
@@ -1312,9 +1310,9 @@
 
 	conn->network_transport = np->np_network_transport;
 
-	pr_debug("Received iSCSI login request from %s on %s Network"
-		" Portal %s:%hu\n", conn->login_ip, np->np_transport->name,
-		conn->local_ip, conn->local_port);
+	pr_debug("Received iSCSI login request from %pISpc on %s Network"
+		" Portal %pISpc\n", &conn->login_sockaddr, np->np_transport->name,
+		&conn->local_sockaddr);
 
 	pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
 	conn->conn_state	= TARG_CONN_STATE_IN_LOGIN;
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 57aa0d0..b597aa2 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -5,9 +5,9 @@
 extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
 extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
 extern int iscsit_setup_np(struct iscsi_np *,
-				struct __kernel_sockaddr_storage *);
+				struct sockaddr_storage *);
 extern int iscsi_target_setup_login_socket(struct iscsi_np *,
-				struct __kernel_sockaddr_storage *);
+				struct sockaddr_storage *);
 extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
 extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
 extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index f9cde91..5c964c0 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -341,7 +341,6 @@
 static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
 {
 	u32 padding = 0;
-	struct iscsi_session *sess = conn->sess;
 	struct iscsi_login_rsp *login_rsp;
 
 	login_rsp = (struct iscsi_login_rsp *) login->rsp;
@@ -353,7 +352,7 @@
 	login_rsp->itt			= login->init_task_tag;
 	login_rsp->statsn		= cpu_to_be32(conn->stat_sn++);
 	login_rsp->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
-	login_rsp->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	login_rsp->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
 
 	pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
 		" ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
@@ -382,10 +381,6 @@
 		goto err;
 
 	login->rsp_length		= 0;
-	mutex_lock(&sess->cmdsn_mutex);
-	login_rsp->exp_cmdsn		= cpu_to_be32(sess->exp_cmd_sn);
-	login_rsp->max_cmdsn		= cpu_to_be32(sess->max_cmd_sn);
-	mutex_unlock(&sess->cmdsn_mutex);
 
 	return 0;
 
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index e8a52f7..51d1734 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -407,6 +407,7 @@
 			TYPERANGE_UTF8, USE_INITIAL_ONLY);
 	if (!param)
 		goto out;
+
 	/*
 	 * Extra parameters for ISER from RFC-5046
 	 */
@@ -496,9 +497,9 @@
 		} else if (!strcmp(param->name, SESSIONTYPE)) {
 			SET_PSTATE_NEGOTIATE(param);
 		} else if (!strcmp(param->name, IFMARKER)) {
-			SET_PSTATE_NEGOTIATE(param);
+			SET_PSTATE_REJECT(param);
 		} else if (!strcmp(param->name, OFMARKER)) {
-			SET_PSTATE_NEGOTIATE(param);
+			SET_PSTATE_REJECT(param);
 		} else if (!strcmp(param->name, IFMARKINT)) {
 			SET_PSTATE_REJECT(param);
 		} else if (!strcmp(param->name, OFMARKINT)) {
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 5e1349a..9dd94ff 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -430,7 +430,7 @@
 	int ret;
 
 	spin_lock(&lstat->lock);
-	ret = snprintf(page, PAGE_SIZE, "%s\n", lstat->last_intr_fail_ip_addr);
+	ret = snprintf(page, PAGE_SIZE, "%pISc\n", &lstat->last_intr_fail_sockaddr);
 	spin_unlock(&lstat->lock);
 
 	return ret;
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index cf59c39..11320df 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -50,7 +50,7 @@
 		pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
 			" %hu.\n", hdr->rtt, conn->cid);
 		return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
-			iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ?
+			iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), (u32) atomic_read(&conn->sess->max_cmd_sn))) ?
 			ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
 	}
 	if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 968068f..23c95cd 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -226,6 +226,7 @@
 	a->default_erl = TA_DEFAULT_ERL;
 	a->t10_pi = TA_DEFAULT_T10_PI;
 	a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
+	a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
 }
 
 int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -430,7 +431,7 @@
 
 static bool iscsit_tpg_check_network_portal(
 	struct iscsi_tiqn *tiqn,
-	struct __kernel_sockaddr_storage *sockaddr,
+	struct sockaddr_storage *sockaddr,
 	int network_transport)
 {
 	struct iscsi_portal_group *tpg;
@@ -459,8 +460,7 @@
 
 struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
 	struct iscsi_portal_group *tpg,
-	struct __kernel_sockaddr_storage *sockaddr,
-	char *ip_str,
+	struct sockaddr_storage *sockaddr,
 	struct iscsi_tpg_np *tpg_np_parent,
 	int network_transport)
 {
@@ -470,8 +470,8 @@
 	if (!tpg_np_parent) {
 		if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
 				network_transport)) {
-			pr_err("Network Portal: %s already exists on a"
-				" different TPG on %s\n", ip_str,
+			pr_err("Network Portal: %pISc already exists on a"
+				" different TPG on %s\n", sockaddr,
 				tpg->tpg_tiqn->tiqn);
 			return ERR_PTR(-EEXIST);
 		}
@@ -484,7 +484,7 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	np = iscsit_add_np(sockaddr, ip_str, network_transport);
+	np = iscsit_add_np(sockaddr, network_transport);
 	if (IS_ERR(np)) {
 		kfree(tpg_np);
 		return ERR_CAST(np);
@@ -514,8 +514,8 @@
 		spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
 	}
 
-	pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
-		tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+	pr_debug("CORE[%s] - Added Network Portal: %pISpc,%hu on %s\n",
+		tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt,
 		np->np_transport->name);
 
 	return tpg_np;
@@ -528,8 +528,8 @@
 {
 	iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
 
-	pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
-		tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+	pr_debug("CORE[%s] - Removed Network Portal: %pISpc,%hu on %s\n",
+		tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt,
 		np->np_transport->name);
 
 	tpg_np->tpg_np = NULL;
@@ -892,3 +892,21 @@
 
 	return 0;
 }
+
+int iscsit_ta_tpg_enabled_sendtargets(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->tpg_enabled_sendtargets = flag;
+	pr_debug("iSCSI_TPG[%hu] - TPG enabled bit required for SendTargets:"
+		" %s\n", tpg->tpgt, (a->tpg_enabled_sendtargets) ? "ON" : "OFF");
+
+	return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 95ff5bd..9db32bd 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -22,7 +22,7 @@
 extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
 extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
 extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
-			struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *,
+			struct sockaddr_storage *, struct iscsi_tpg_np *,
 			int);
 extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
 			struct iscsi_tpg_np *);
@@ -40,5 +40,6 @@
 extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32);
 
 #endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index a2bff07..428b0d9 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -233,6 +233,7 @@
 
 static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
 {
+	u32 max_cmdsn;
 	int ret;
 
 	/*
@@ -241,10 +242,10 @@
 	 * or order CmdSNs due to multiple connection sessions and/or
 	 * CRC failures.
 	 */
-	if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
+	max_cmdsn = atomic_read(&sess->max_cmd_sn);
+	if (iscsi_sna_gt(cmdsn, max_cmdsn)) {
 		pr_err("Received CmdSN: 0x%08x is greater than"
-		       " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn,
-		       sess->max_cmd_sn);
+		       " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn);
 		ret = CMDSN_MAXCMDSN_OVERRUN;
 
 	} else if (cmdsn == sess->exp_cmd_sn) {
@@ -1371,6 +1372,33 @@
 	return iscsit_do_tx_data(conn, &c);
 }
 
+static bool sockaddr_equal(struct sockaddr_storage *x, struct sockaddr_storage *y)
+{
+	switch (x->ss_family) {
+	case AF_INET: {
+		struct sockaddr_in *sinx = (struct sockaddr_in *)x;
+		struct sockaddr_in *siny = (struct sockaddr_in *)y;
+		if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
+			return false;
+		if (sinx->sin_port != siny->sin_port)
+			return false;
+		break;
+	}
+	case AF_INET6: {
+		struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
+		struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
+		if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
+			return false;
+		if (sinx->sin6_port != siny->sin6_port)
+			return false;
+		break;
+	}
+	default:
+		return false;
+	}
+	return true;
+}
+
 void iscsit_collect_login_stats(
 	struct iscsi_conn *conn,
 	u8 status_class,
@@ -1387,7 +1415,7 @@
 	ls = &tiqn->login_stats;
 
 	spin_lock(&ls->lock);
-	if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) &&
+	if (sockaddr_equal(&conn->login_sockaddr, &ls->last_intr_fail_sockaddr) &&
 	    ((get_jiffies_64() - ls->last_fail_time) < 10)) {
 		/* We already have the failure info for this login */
 		spin_unlock(&ls->lock);
@@ -1427,8 +1455,7 @@
 
 		ls->last_intr_fail_ip_family = conn->login_family;
 
-		snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
-				"%s", conn->login_ip);
+		ls->last_intr_fail_sockaddr = conn->login_sockaddr;
 		ls->last_fail_time = get_jiffies_64();
 	}
 
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index a556bde..5bc85ff 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -526,7 +526,7 @@
 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
 {
 	/*
-	 * Return the passed NAA identifier for the SAS Target Port
+	 * Return the passed NAA identifier for the Target Port
 	 */
 	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
 }
@@ -845,7 +845,7 @@
 		transport_free_session(tl_nexus->se_sess);
 		goto out;
 	}
-	/* Now, register the SAS I_T Nexus as active. */
+	/* Now, register the I_T Nexus as active. */
 	transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
 			tl_nexus->se_sess, tl_nexus);
 	tl_tpg->tl_nexus = tl_nexus;
@@ -884,7 +884,7 @@
 		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
 		tl_nexus->se_sess->se_node_acl->initiatorname);
 	/*
-	 * Release the SCSI I_T Nexus to the emulated SAS Target Port
+	 * Release the SCSI I_T Nexus to the emulated Target Port
 	 */
 	transport_deregister_session(tl_nexus->se_sess);
 	tpg->tl_nexus = NULL;
@@ -1034,6 +1034,11 @@
 	}
 	if (!strncmp(page, "offline", 7)) {
 		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
+		if (tl_tpg->tl_nexus) {
+			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
+
+			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
+		}
 		return count;
 	}
 	return -EINVAL;
@@ -1077,7 +1082,7 @@
 	tl_tpg->tl_hba = tl_hba;
 	tl_tpg->tl_tpgt = tpgt;
 	/*
-	 * Register the tl_tpg as a emulated SAS TCM Target Endpoint
+	 * Register the tl_tpg as a emulated TCM Target Endpoint
 	 */
 	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
 	if (ret < 0)
@@ -1102,11 +1107,11 @@
 	tl_hba = tl_tpg->tl_hba;
 	tpgt = tl_tpg->tl_tpgt;
 	/*
-	 * Release the I_T Nexus for the Virtual SAS link if present
+	 * Release the I_T Nexus for the Virtual target link if present
 	 */
 	tcm_loop_drop_nexus(tl_tpg);
 	/*
-	 * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
+	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
 	 */
 	core_tpg_deregister(se_tpg);
 
@@ -1199,8 +1204,9 @@
 				struct tcm_loop_hba, tl_hba_wwn);
 
 	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
-		" SAS Address: %s at Linux/SCSI Host ID: %d\n",
-		tl_hba->tl_wwn_address, tl_hba->sh->host_no);
+		" %s Address: %s at Linux/SCSI Host ID: %d\n",
+		tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
+		tl_hba->sh->host_no);
 	/*
 	 * Call device_unregister() on the original tl_hba->dev.
 	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 09e682b..88ea4e4 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -62,22 +62,13 @@
 	struct se_session *se_sess = se_cmd->se_sess;
 	struct se_node_acl *nacl = se_sess->se_node_acl;
 	struct se_dev_entry *deve;
+	sense_reason_t ret = TCM_NO_SENSE;
 
 	rcu_read_lock();
 	deve = target_nacl_find_deve(nacl, unpacked_lun);
 	if (deve) {
 		atomic_long_inc(&deve->total_cmds);
 
-		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
-		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
-			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
-				" Access for 0x%08llx\n",
-				se_cmd->se_tfo->get_fabric_name(),
-				unpacked_lun);
-			rcu_read_unlock();
-			return TCM_WRITE_PROTECTED;
-		}
-
 		if (se_cmd->data_direction == DMA_TO_DEVICE)
 			atomic_long_add(se_cmd->data_length,
 					&deve->write_bytes);
@@ -93,6 +84,17 @@
 
 		percpu_ref_get(&se_lun->lun_ref);
 		se_cmd->lun_ref_active = true;
+
+		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
+		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
+			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+				" Access for 0x%08llx\n",
+				se_cmd->se_tfo->get_fabric_name(),
+				unpacked_lun);
+			rcu_read_unlock();
+			ret = TCM_WRITE_PROTECTED;
+			goto ref_dev;
+		}
 	}
 	rcu_read_unlock();
 
@@ -109,12 +111,6 @@
 				unpacked_lun);
 			return TCM_NON_EXISTENT_LUN;
 		}
-		/*
-		 * Force WRITE PROTECT for virtual LUN 0
-		 */
-		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
-		    (se_cmd->data_direction != DMA_NONE))
-			return TCM_WRITE_PROTECTED;
 
 		se_lun = se_sess->se_tpg->tpg_virt_lun0;
 		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
@@ -123,6 +119,15 @@
 
 		percpu_ref_get(&se_lun->lun_ref);
 		se_cmd->lun_ref_active = true;
+
+		/*
+		 * Force WRITE PROTECT for virtual LUN 0
+		 */
+		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+		    (se_cmd->data_direction != DMA_NONE)) {
+			ret = TCM_WRITE_PROTECTED;
+			goto ref_dev;
+		}
 	}
 	/*
 	 * RCU reference protected by percpu se_lun->lun_ref taken above that
@@ -130,6 +135,7 @@
 	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
 	 * target_core_fabric_configfs.c:target_fabric_port_release
 	 */
+ref_dev:
 	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 	atomic_long_inc(&se_cmd->se_dev->num_cmds);
 
@@ -140,7 +146,7 @@
 		atomic_long_add(se_cmd->data_length,
 				&se_cmd->se_dev->read_bytes);
 
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL(transport_lookup_cmd_lun);
 
@@ -427,8 +433,6 @@
 
 	hlist_del_rcu(&orig->link);
 	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
-	rcu_assign_pointer(orig->se_lun, NULL);
-	rcu_assign_pointer(orig->se_lun_acl, NULL);
 	orig->lun_flags = 0;
 	orig->creation_time = 0;
 	orig->attach_count--;
@@ -439,6 +443,9 @@
 	kref_put(&orig->pr_kref, target_pr_kref_release);
 	wait_for_completion(&orig->pr_comp);
 
+	rcu_assign_pointer(orig->se_lun, NULL);
+	rcu_assign_pointer(orig->se_lun_acl, NULL);
+
 	kfree_rcu(orig, rcu_head);
 
 	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
@@ -620,8 +627,6 @@
 
 	lacl->mapped_lun = mapped_lun;
 	lacl->se_lun_nacl = nacl;
-	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
-		 nacl->initiatorname);
 
 	return lacl;
 }
@@ -656,7 +661,7 @@
 		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
 		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
-		lacl->initiatorname);
+		nacl->initiatorname);
 	/*
 	 * Check to see if there are any existing persistent reservation APTPL
 	 * pre-registrations that need to be enabled for this LUN ACL..
@@ -688,7 +693,7 @@
 		" InitiatorNode: %s Mapped LUN: %llu\n",
 		tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
-		lacl->initiatorname, lacl->mapped_lun);
+		nacl->initiatorname, lacl->mapped_lun);
 
 	return 0;
 }
@@ -701,7 +706,7 @@
 		" Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
 		tpg->se_tpg_tfo->get_fabric_name(),
-		lacl->initiatorname, lacl->mapped_lun);
+		lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
 
 	kfree(lacl);
 }
@@ -754,7 +759,7 @@
 	dev->dev_link_magic = SE_DEV_LINK_MAGIC;
 	dev->se_hba = hba;
 	dev->transport = hba->backend->ops;
-	dev->prot_length = sizeof(struct se_dif_v1_tuple);
+	dev->prot_length = sizeof(struct t10_pi_tuple);
 	dev->hba_index = hba->hba_index;
 
 	INIT_LIST_HEAD(&dev->dev_list);
@@ -771,7 +776,6 @@
 	spin_lock_init(&dev->se_tmr_lock);
 	spin_lock_init(&dev->qf_cmd_lock);
 	sema_init(&dev->caw_sem, 1);
-	atomic_set(&dev->dev_ordered_id, 0);
 	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
 	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
 	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 48a3698..be42429 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -203,7 +203,7 @@
 	pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
 		" Mapped LUN: %llu Write Protect bit to %s\n",
 		se_tpg->se_tpg_tfo->get_fabric_name(),
-		lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
+		se_nacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
 
 	return count;
 
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index be9cefc..22390e0 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -184,3 +184,8 @@
 	kfree(hba);
 	return 0;
 }
+
+bool target_sense_desc_format(struct se_device *dev)
+{
+	return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;
+}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 5a9982f..0f19e11 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -105,6 +105,8 @@
 	mode = FMODE_READ|FMODE_EXCL;
 	if (!ib_dev->ibd_readonly)
 		mode |= FMODE_WRITE;
+	else
+		dev->dev_flags |= DF_READ_ONLY;
 
 	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
 	if (IS_ERR(bd)) {
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5ab7100..e793311 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -618,7 +618,7 @@
 	struct se_device *dev,
 	struct se_node_acl *nacl,
 	struct se_lun *lun,
-	struct se_dev_entry *deve,
+	struct se_dev_entry *dest_deve,
 	u64 mapped_lun,
 	unsigned char *isid,
 	u64 sa_res_key,
@@ -640,7 +640,29 @@
 	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
 	atomic_set(&pr_reg->pr_res_holders, 0);
 	pr_reg->pr_reg_nacl = nacl;
-	pr_reg->pr_reg_deve = deve;
+	/*
+	 * For destination registrations for ALL_TG_PT=1 and SPEC_I_PT=1,
+	 * the se_dev_entry->pr_ref will have been already obtained by
+	 * core_get_se_deve_from_rtpi() or __core_scsi3_alloc_registration().
+	 *
+	 * Otherwise, locate se_dev_entry now and obtain a reference until
+	 * registration completes in __core_scsi3_add_registration().
+	 */
+	if (dest_deve) {
+		pr_reg->pr_reg_deve = dest_deve;
+	} else {
+		rcu_read_lock();
+		pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
+		if (!pr_reg->pr_reg_deve) {
+			rcu_read_unlock();
+			pr_err("Unable to locate PR deve %s mapped_lun: %llu\n",
+				nacl->initiatorname, mapped_lun);
+			kmem_cache_free(t10_pr_reg_cache, pr_reg);
+			return NULL;
+		}
+		kref_get(&pr_reg->pr_reg_deve->pr_kref);
+		rcu_read_unlock();
+	}
 	pr_reg->pr_res_mapped_lun = mapped_lun;
 	pr_reg->pr_aptpl_target_lun = lun->unpacked_lun;
 	pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
@@ -936,17 +958,29 @@
 		    !(strcmp(pr_reg->pr_tport, t_port)) &&
 		     (pr_reg->pr_reg_tpgt == tpgt) &&
 		     (pr_reg->pr_aptpl_target_lun == target_lun)) {
+			/*
+			 * Obtain the ->pr_reg_deve pointer + reference, that
+			 * is released by __core_scsi3_add_registration() below.
+			 */
+			rcu_read_lock();
+			pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
+			if (!pr_reg->pr_reg_deve) {
+				pr_err("Unable to locate PR APTPL %s mapped_lun:"
+					" %llu\n", nacl->initiatorname, mapped_lun);
+				rcu_read_unlock();
+				continue;
+			}
+			kref_get(&pr_reg->pr_reg_deve->pr_kref);
+			rcu_read_unlock();
 
 			pr_reg->pr_reg_nacl = nacl;
 			pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
-
 			list_del(&pr_reg->pr_reg_aptpl_list);
 			spin_unlock(&pr_tmpl->aptpl_reg_lock);
 			/*
 			 * At this point all of the pointers in *pr_reg will
 			 * be setup, so go ahead and add the registration.
 			 */
-
 			__core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
 			/*
 			 * If this registration is the reservation holder,
@@ -1044,18 +1078,11 @@
 
 	__core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
 	spin_unlock(&pr_tmpl->registration_lock);
-
-	rcu_read_lock();
-	deve = pr_reg->pr_reg_deve;
-	if (deve)
-		set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
-	rcu_read_unlock();
-
 	/*
 	 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
 	 */
 	if (!pr_reg->pr_reg_all_tg_pt || register_move)
-		return;
+		goto out;
 	/*
 	 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
 	 * allocated in __core_scsi3_alloc_registration()
@@ -1075,19 +1102,31 @@
 		__core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp,
 					       register_type);
 		spin_unlock(&pr_tmpl->registration_lock);
-
+		/*
+		 * Drop configfs group dependency reference and deve->pr_kref
+		 * obtained from  __core_scsi3_alloc_registration() code.
+		 */
 		rcu_read_lock();
 		deve = pr_reg_tmp->pr_reg_deve;
-		if (deve)
+		if (deve) {
 			set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+			core_scsi3_lunacl_undepend_item(deve);
+			pr_reg_tmp->pr_reg_deve = NULL;
+		}
 		rcu_read_unlock();
-
-		/*
-		 * Drop configfs group dependency reference from
-		 * __core_scsi3_alloc_registration()
-		 */
-		core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
 	}
+out:
+	/*
+	 * Drop deve->pr_kref obtained in __core_scsi3_do_alloc_registration()
+	 */
+	rcu_read_lock();
+	deve = pr_reg->pr_reg_deve;
+	if (deve) {
+		set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+		kref_put(&deve->pr_kref, target_pr_kref_release);
+		pr_reg->pr_reg_deve = NULL;
+	}
+	rcu_read_unlock();
 }
 
 static int core_scsi3_alloc_registration(
@@ -1785,9 +1824,11 @@
 			dest_node_acl->initiatorname, i_buf, (dest_se_deve) ?
 			dest_se_deve->mapped_lun : 0);
 
-		if (!dest_se_deve)
+		if (!dest_se_deve) {
+			kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
+				 target_pr_kref_release);
 			continue;
-
+		}
 		core_scsi3_lunacl_undepend_item(dest_se_deve);
 		core_scsi3_nodeacl_undepend_item(dest_node_acl);
 		core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1823,9 +1864,11 @@
 
 		kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
 
-		if (!dest_se_deve)
+		if (!dest_se_deve) {
+			kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
+				 target_pr_kref_release);
 			continue;
-
+		}
 		core_scsi3_lunacl_undepend_item(dest_se_deve);
 		core_scsi3_nodeacl_undepend_item(dest_node_acl);
 		core_scsi3_tpg_undepend_item(dest_tpg);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index e318ddb..0b4b2a6 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -154,6 +154,38 @@
 	return 0;
 }
 
+static sense_reason_t
+sbc_emulate_startstop(struct se_cmd *cmd)
+{
+	unsigned char *cdb = cmd->t_task_cdb;
+
+	/*
+	 * See sbc3r36 section 5.25
+	 * Immediate bit should be set since there is nothing to complete
+	 * POWER CONDITION MODIFIER 0h
+	 */
+	if (!(cdb[1] & 1) || cdb[2] || cdb[3])
+		return TCM_INVALID_CDB_FIELD;
+
+	/*
+	 * See sbc3r36 section 5.25
+	 * POWER CONDITION 0h START_VALID - process START and LOEJ
+	 */
+	if (cdb[4] >> 4 & 0xf)
+		return TCM_INVALID_CDB_FIELD;
+
+	/*
+	 * See sbc3r36 section 5.25
+	 * LOEJ 0h - nothing to load or unload
+	 * START 1h - we are ready
+	 */
+	if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4))
+		return TCM_INVALID_CDB_FIELD;
+
+	target_complete_cmd(cmd, SAM_STAT_GOOD);
+	return 0;
+}
+
 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
 {
 	u32 num_blocks;
@@ -960,6 +992,9 @@
 			       " than 1\n", sectors);
 			return TCM_INVALID_CDB_FIELD;
 		}
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
+
 		/*
 		 * Double size because we have two buffers, note that
 		 * zero is not an error..
@@ -1069,6 +1104,10 @@
 		size = 0;
 		cmd->execute_cmd = sbc_emulate_noop;
 		break;
+	case START_STOP:
+		size = 0;
+		cmd->execute_cmd = sbc_emulate_startstop;
+		break;
 	default:
 		ret = spc_parse_cdb(cmd, &size);
 		if (ret)
@@ -1191,7 +1230,7 @@
 sbc_dif_generate(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
-	struct se_dif_v1_tuple *sdt;
+	struct t10_pi_tuple *sdt;
 	struct scatterlist *dsg = cmd->t_data_sg, *psg;
 	sector_t sector = cmd->t_task_lba;
 	void *daddr, *paddr;
@@ -1203,7 +1242,7 @@
 		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
 
 		for (j = 0; j < psg->length;
-				j += sizeof(struct se_dif_v1_tuple)) {
+				j += sizeof(*sdt)) {
 			__u16 crc;
 			unsigned int avail;
 
@@ -1256,7 +1295,7 @@
 }
 
 static sense_reason_t
-sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt,
+sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt,
 		  __u16 crc, sector_t sector, unsigned int ei_lba)
 {
 	__be16 csum;
@@ -1346,7 +1385,7 @@
 	       unsigned int ei_lba, struct scatterlist *psg, int psg_off)
 {
 	struct se_device *dev = cmd->se_dev;
-	struct se_dif_v1_tuple *sdt;
+	struct t10_pi_tuple *sdt;
 	struct scatterlist *dsg = cmd->t_data_sg;
 	sector_t sector = start;
 	void *daddr, *paddr;
@@ -1361,7 +1400,7 @@
 
 		for (i = psg_off; i < psg->length &&
 				sector < start + sectors;
-				i += sizeof(struct se_dif_v1_tuple)) {
+				i += sizeof(*sdt)) {
 			__u16 crc;
 			unsigned int avail;
 
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index f87d4ce..9413e1a 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -484,8 +484,8 @@
 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
 {
 	struct se_device *dev = cmd->se_dev;
-	int have_tp = 0;
-	int opt, min;
+	u32 mtl = 0;
+	int have_tp = 0, opt, min;
 
 	/*
 	 * Following spc3r22 section 6.5.3 Block Limits VPD page, when
@@ -516,8 +516,15 @@
 
 	/*
 	 * Set MAXIMUM TRANSFER LENGTH
+	 *
+	 * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics
+	 * enforcing maximum HW scatter-gather-list entry limit
 	 */
-	put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
+	if (cmd->se_tfo->max_data_sg_nents) {
+		mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) /
+		       dev->dev_attrib.block_size;
+	}
+	put_unaligned_be32(min_not_zero(mtl, dev->dev_attrib.hw_max_sectors), &buf[8]);
 
 	/*
 	 * Set OPTIMAL TRANSFER LENGTH
@@ -768,7 +775,12 @@
 	if (pc == 1)
 		goto out;
 
-	p[2] = 2;
+	/* GLTSD: No implicit save of log parameters */
+	p[2] = (1 << 1);
+	if (target_sense_desc_format(dev))
+		/* D_SENSE: Descriptor format sense data for 64bit sectors */
+		p[2] |= (1 << 2);
+
 	/*
 	 * From spc4r23, 7.4.7 Control mode page
 	 *
@@ -1151,6 +1163,7 @@
 	unsigned char *rbuf;
 	u8 ua_asc = 0, ua_ascq = 0;
 	unsigned char buf[SE_SENSE_BUF];
+	bool desc_format = target_sense_desc_format(cmd->se_dev);
 
 	memset(buf, 0, SE_SENSE_BUF);
 
@@ -1164,32 +1177,11 @@
 	if (!rbuf)
 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
-	if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
-		/*
-		 * CURRENT ERROR, UNIT ATTENTION
-		 */
-		buf[0] = 0x70;
-		buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
-
-		/*
-		 * The Additional Sense Code (ASC) from the UNIT ATTENTION
-		 */
-		buf[SPC_ASC_KEY_OFFSET] = ua_asc;
-		buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
-		buf[7] = 0x0A;
-	} else {
-		/*
-		 * CURRENT ERROR, NO SENSE
-		 */
-		buf[0] = 0x70;
-		buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
-
-		/*
-		 * NO ADDITIONAL SENSE INFORMATION
-		 */
-		buf[SPC_ASC_KEY_OFFSET] = 0x00;
-		buf[7] = 0x0A;
-	}
+	if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))
+		scsi_build_sense_buffer(desc_format, buf, UNIT_ATTENTION,
+					ua_asc, ua_ascq);
+	else
+		scsi_build_sense_buffer(desc_format, buf, NO_SENSE, 0x0, 0x0);
 
 	memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
 	transport_kunmap_data_sg(cmd);
@@ -1418,9 +1410,6 @@
 		}
 		break;
 	default:
-		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
-			" 0x%02x, sending CHECK_CONDITION.\n",
-			cmd->se_tfo->get_fabric_name(), cdb[0]);
 		return TCM_UNSUPPORTED_SCSI_OPCODE;
 	}
 
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index babde4a..5fb9dd7 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -41,6 +41,7 @@
 #include "target_core_internal.h"
 #include "target_core_alua.h"
 #include "target_core_pr.h"
+#include "target_core_ua.h"
 
 extern struct se_device *g_lun0_dev;
 
@@ -83,6 +84,22 @@
 }
 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
 
+void core_allocate_nexus_loss_ua(
+	struct se_node_acl *nacl)
+{
+	struct se_dev_entry *deve;
+
+	if (!nacl)
+		return;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
+		core_scsi3_ua_allocate(deve, 0x29,
+			ASCQ_29H_NEXUS_LOSS_OCCURRED);
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
+
 /*	core_tpg_add_node_to_devs():
  *
  *
@@ -651,7 +668,10 @@
 	list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
 	spin_unlock(&dev->se_port_lock);
 
-	lun->lun_access = lun_access;
+	if (dev->dev_flags & DF_READ_ONLY)
+		lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+	else
+		lun->lun_access = lun_access;
 	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
 		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
 	mutex_unlock(&tpg->tpg_lun_mutex);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ce8574b..5bacc7b 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -39,6 +39,7 @@
 #include <net/sock.h>
 #include <net/tcp.h>
 #include <scsi/scsi_proto.h>
+#include <scsi/scsi_common.h>
 
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
@@ -1074,6 +1075,55 @@
 }
 EXPORT_SYMBOL(transport_set_vpd_ident);
 
+static sense_reason_t
+target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
+			       unsigned int size)
+{
+	u32 mtl;
+
+	if (!cmd->se_tfo->max_data_sg_nents)
+		return TCM_NO_SENSE;
+	/*
+	 * Check if fabric enforced maximum SGL entries per I/O descriptor
+	 * exceeds se_cmd->data_length.  If true, set SCF_UNDERFLOW_BIT +
+	 * residual_count and reduce original cmd->data_length to maximum
+	 * length based on single PAGE_SIZE entry scatter-lists.
+	 */
+	mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
+	if (cmd->data_length > mtl) {
+		/*
+		 * If an existing CDB overflow is present, calculate new residual
+		 * based on CDB size minus fabric maximum transfer length.
+		 *
+		 * If an existing CDB underflow is present, calculate new residual
+		 * based on original cmd->data_length minus fabric maximum transfer
+		 * length.
+		 *
+		 * Otherwise, set the underflow residual based on cmd->data_length
+		 * minus fabric maximum transfer length.
+		 */
+		if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+			cmd->residual_count = (size - mtl);
+		} else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+			u32 orig_dl = size + cmd->residual_count;
+			cmd->residual_count = (orig_dl - mtl);
+		} else {
+			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+			cmd->residual_count = (cmd->data_length - mtl);
+		}
+		cmd->data_length = mtl;
+		/*
+		 * Reset sbc_check_prot() calculated protection payload
+		 * length based upon the new smaller MTL.
+		 */
+		if (cmd->prot_length) {
+			u32 sectors = (mtl / dev->dev_attrib.block_size);
+			cmd->prot_length = dev->prot_length * sectors;
+		}
+	}
+	return TCM_NO_SENSE;
+}
+
 sense_reason_t
 target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
 {
@@ -1087,9 +1137,9 @@
 			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
 				cmd->data_length, size, cmd->t_task_cdb[0]);
 
-		if (cmd->data_direction == DMA_TO_DEVICE) {
-			pr_err("Rejecting underflow/overflow"
-					" WRITE data\n");
+		if (cmd->data_direction == DMA_TO_DEVICE &&
+		    cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+			pr_err("Rejecting underflow/overflow WRITE data\n");
 			return TCM_INVALID_CDB_FIELD;
 		}
 		/*
@@ -1119,7 +1169,7 @@
 		}
 	}
 
-	return 0;
+	return target_check_max_data_sg_nents(cmd, dev, size);
 
 }
 
@@ -1177,14 +1227,7 @@
 			" emulation is not supported\n");
 		return TCM_INVALID_CDB_FIELD;
 	}
-	/*
-	 * Used to determine when ORDERED commands should go from
-	 * Dormant to Active status.
-	 */
-	cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
-	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
-			cmd->se_ordered_id, cmd->sam_task_attr,
-			dev->transport->name);
+
 	return 0;
 }
 
@@ -1246,6 +1289,11 @@
 	}
 
 	ret = dev->transport->parse_cdb(cmd);
+	if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
+		pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
+				    cmd->se_tfo->get_fabric_name(),
+				    cmd->se_sess->se_node_acl->initiatorname,
+				    cmd->t_task_cdb[0]);
 	if (ret)
 		return ret;
 
@@ -1693,8 +1741,7 @@
 
 check_stop:
 	transport_lun_remove_cmd(cmd);
-	if (!transport_cmd_check_stop_to_fabric(cmd))
-		;
+	transport_cmd_check_stop_to_fabric(cmd);
 	return;
 
 queue_full:
@@ -1767,16 +1814,14 @@
 	 */
 	switch (cmd->sam_task_attr) {
 	case TCM_HEAD_TAG:
-		pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
-			 "se_ordered_id: %u\n",
-			 cmd->t_task_cdb[0], cmd->se_ordered_id);
+		pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
+			 cmd->t_task_cdb[0]);
 		return false;
 	case TCM_ORDERED_TAG:
 		atomic_inc_mb(&dev->dev_ordered_sync);
 
-		pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
-			 " se_ordered_id: %u\n",
-			 cmd->t_task_cdb[0], cmd->se_ordered_id);
+		pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
+			 cmd->t_task_cdb[0]);
 
 		/*
 		 * Execute an ORDERED command if no other older commands
@@ -1800,10 +1845,8 @@
 	list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
 	spin_unlock(&dev->delayed_cmd_lock);
 
-	pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
-		" delayed CMD list, se_ordered_id: %u\n",
-		cmd->t_task_cdb[0], cmd->sam_task_attr,
-		cmd->se_ordered_id);
+	pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
+		cmd->t_task_cdb[0], cmd->sam_task_attr);
 	return true;
 }
 
@@ -1888,20 +1931,18 @@
 	if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
 		atomic_dec_mb(&dev->simple_cmds);
 		dev->dev_cur_ordered_id++;
-		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
-			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
-			cmd->se_ordered_id);
+		pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n",
+			 dev->dev_cur_ordered_id);
 	} else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
 		dev->dev_cur_ordered_id++;
-		pr_debug("Incremented dev_cur_ordered_id: %u for"
-			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
-			cmd->se_ordered_id);
+		pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
+			 dev->dev_cur_ordered_id);
 	} else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
 		atomic_dec_mb(&dev->dev_ordered_sync);
 
 		dev->dev_cur_ordered_id++;
-		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
-			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
+		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
+			 dev->dev_cur_ordered_id);
 	}
 
 	target_restart_delayed_cmds(dev);
@@ -2615,37 +2656,159 @@
 }
 EXPORT_SYMBOL(transport_wait_for_tasks);
 
-static int transport_get_sense_codes(
-	struct se_cmd *cmd,
-	u8 *asc,
-	u8 *ascq)
+struct sense_info {
+	u8 key;
+	u8 asc;
+	u8 ascq;
+	bool add_sector_info;
+};
+
+static const struct sense_info sense_info_table[] = {
+	[TCM_NO_SENSE] = {
+		.key = NOT_READY
+	},
+	[TCM_NON_EXISTENT_LUN] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
+	},
+	[TCM_UNSUPPORTED_SCSI_OPCODE] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
+	},
+	[TCM_SECTOR_COUNT_TOO_MANY] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
+	},
+	[TCM_UNKNOWN_MODE_PAGE] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x24, /* INVALID FIELD IN CDB */
+	},
+	[TCM_CHECK_CONDITION_ABORT_CMD] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
+		.ascq = 0x03,
+	},
+	[TCM_INCORRECT_AMOUNT_OF_DATA] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x0c, /* WRITE ERROR */
+		.ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
+	},
+	[TCM_INVALID_CDB_FIELD] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x24, /* INVALID FIELD IN CDB */
+	},
+	[TCM_INVALID_PARAMETER_LIST] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
+	},
+	[TCM_PARAMETER_LIST_LENGTH_ERROR] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
+	},
+	[TCM_UNEXPECTED_UNSOLICITED_DATA] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x0c, /* WRITE ERROR */
+		.ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
+	},
+	[TCM_SERVICE_CRC_ERROR] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
+		.ascq = 0x05, /* N/A */
+	},
+	[TCM_SNACK_REJECTED] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x11, /* READ ERROR */
+		.ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
+	},
+	[TCM_WRITE_PROTECTED] = {
+		.key = DATA_PROTECT,
+		.asc = 0x27, /* WRITE PROTECTED */
+	},
+	[TCM_ADDRESS_OUT_OF_RANGE] = {
+		.key = ILLEGAL_REQUEST,
+		.asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
+	},
+	[TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
+		.key = UNIT_ATTENTION,
+	},
+	[TCM_CHECK_CONDITION_NOT_READY] = {
+		.key = NOT_READY,
+	},
+	[TCM_MISCOMPARE_VERIFY] = {
+		.key = MISCOMPARE,
+		.asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
+		.ascq = 0x00,
+	},
+	[TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x10,
+		.ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
+		.add_sector_info = true,
+	},
+	[TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x10,
+		.ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
+		.add_sector_info = true,
+	},
+	[TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
+		.key = ABORTED_COMMAND,
+		.asc = 0x10,
+		.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
+		.add_sector_info = true,
+	},
+	[TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
+		/*
+		 * Returning ILLEGAL REQUEST would cause immediate IO errors on
+		 * Solaris initiators.  Returning NOT READY instead means the
+		 * operations will be retried a finite number of times and we
+		 * can survive intermittent errors.
+		 */
+		.key = NOT_READY,
+		.asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
+	},
+};
+
+static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
 {
-	*asc = cmd->scsi_asc;
-	*ascq = cmd->scsi_ascq;
+	const struct sense_info *si;
+	u8 *buffer = cmd->sense_buffer;
+	int r = (__force int)reason;
+	u8 asc, ascq;
+	bool desc_format = target_sense_desc_format(cmd->se_dev);
+
+	if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
+		si = &sense_info_table[r];
+	else
+		si = &sense_info_table[(__force int)
+				       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
+
+	if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
+		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
+		WARN_ON_ONCE(asc == 0);
+	} else if (si->asc == 0) {
+		WARN_ON_ONCE(cmd->scsi_asc == 0);
+		asc = cmd->scsi_asc;
+		ascq = cmd->scsi_ascq;
+	} else {
+		asc = si->asc;
+		ascq = si->ascq;
+	}
+
+	scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
+	if (si->add_sector_info)
+		return scsi_set_sense_information(buffer,
+						  cmd->scsi_sense_length,
+						  cmd->bad_sector);
 
 	return 0;
 }
 
-static
-void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector)
-{
-	/* Place failed LBA in sense data information descriptor 0. */
-	buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc;
-	buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */
-	buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa;
-	buffer[SPC_VALIDITY_OFFSET] = 0x80;
-
-	/* Descriptor Information: failing sector */
-	put_unaligned_be64(bad_sector, &buffer[12]);
-}
-
 int
 transport_send_check_condition_and_sense(struct se_cmd *cmd,
 		sense_reason_t reason, int from_transport)
 {
-	unsigned char *buffer = cmd->sense_buffer;
 	unsigned long flags;
-	u8 asc = 0, ascq = 0;
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
@@ -2655,243 +2818,17 @@
 	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-	if (!reason && from_transport)
-		goto after_reason;
+	if (!from_transport) {
+		int rc;
 
-	if (!from_transport)
 		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
-
-	/*
-	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
-	 * SENSE KEY values from include/scsi/scsi.h
-	 */
-	switch (reason) {
-	case TCM_NO_SENSE:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* Not Ready */
-		buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
-		/* NO ADDITIONAL SENSE INFORMATION */
-		buffer[SPC_ASC_KEY_OFFSET] = 0;
-		buffer[SPC_ASCQ_KEY_OFFSET] = 0;
-		break;
-	case TCM_NON_EXISTENT_LUN:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ILLEGAL REQUEST */
-		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
-		/* LOGICAL UNIT NOT SUPPORTED */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x25;
-		break;
-	case TCM_UNSUPPORTED_SCSI_OPCODE:
-	case TCM_SECTOR_COUNT_TOO_MANY:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ILLEGAL REQUEST */
-		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
-		/* INVALID COMMAND OPERATION CODE */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x20;
-		break;
-	case TCM_UNKNOWN_MODE_PAGE:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ILLEGAL REQUEST */
-		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
-		/* INVALID FIELD IN CDB */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x24;
-		break;
-	case TCM_CHECK_CONDITION_ABORT_CMD:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ABORTED COMMAND */
-		buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
-		/* BUS DEVICE RESET FUNCTION OCCURRED */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x29;
-		buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
-		break;
-	case TCM_INCORRECT_AMOUNT_OF_DATA:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ABORTED COMMAND */
-		buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
-		/* WRITE ERROR */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
-		/* NOT ENOUGH UNSOLICITED DATA */
-		buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d;
-		break;
-	case TCM_INVALID_CDB_FIELD:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ILLEGAL REQUEST */
-		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
-		/* INVALID FIELD IN CDB */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x24;
-		break;
-	case TCM_INVALID_PARAMETER_LIST:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ILLEGAL REQUEST */
-		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
-		/* INVALID FIELD IN PARAMETER LIST */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x26;
-		break;
-	case TCM_PARAMETER_LIST_LENGTH_ERROR:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ILLEGAL REQUEST */
-		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
-		/* PARAMETER LIST LENGTH ERROR */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x1a;
-		break;
-	case TCM_UNEXPECTED_UNSOLICITED_DATA:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ABORTED COMMAND */
-		buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
-		/* WRITE ERROR */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
-		/* UNEXPECTED_UNSOLICITED_DATA */
-		buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c;
-		break;
-	case TCM_SERVICE_CRC_ERROR:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ABORTED COMMAND */
-		buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
-		/* PROTOCOL SERVICE CRC ERROR */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x47;
-		/* N/A */
-		buffer[SPC_ASCQ_KEY_OFFSET] = 0x05;
-		break;
-	case TCM_SNACK_REJECTED:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ABORTED COMMAND */
-		buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
-		/* READ ERROR */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x11;
-		/* FAILED RETRANSMISSION REQUEST */
-		buffer[SPC_ASCQ_KEY_OFFSET] = 0x13;
-		break;
-	case TCM_WRITE_PROTECTED:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* DATA PROTECT */
-		buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
-		/* WRITE PROTECTED */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x27;
-		break;
-	case TCM_ADDRESS_OUT_OF_RANGE:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ILLEGAL REQUEST */
-		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
-		/* LOGICAL BLOCK ADDRESS OUT OF RANGE */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x21;
-		break;
-	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* UNIT ATTENTION */
-		buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
-		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
-		buffer[SPC_ASC_KEY_OFFSET] = asc;
-		buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
-		break;
-	case TCM_CHECK_CONDITION_NOT_READY:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* Not Ready */
-		buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
-		transport_get_sense_codes(cmd, &asc, &ascq);
-		buffer[SPC_ASC_KEY_OFFSET] = asc;
-		buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
-		break;
-	case TCM_MISCOMPARE_VERIFY:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE;
-		/* MISCOMPARE DURING VERIFY OPERATION */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
-		buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
-		break;
-	case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ILLEGAL REQUEST */
-		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
-		/* LOGICAL BLOCK GUARD CHECK FAILED */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x10;
-		buffer[SPC_ASCQ_KEY_OFFSET] = 0x01;
-		transport_err_sector_info(buffer, cmd->bad_sector);
-		break;
-	case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ILLEGAL REQUEST */
-		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
-		/* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x10;
-		buffer[SPC_ASCQ_KEY_OFFSET] = 0x02;
-		transport_err_sector_info(buffer, cmd->bad_sector);
-		break;
-	case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ILLEGAL REQUEST */
-		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
-		/* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x10;
-		buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
-		transport_err_sector_info(buffer, cmd->bad_sector);
-		break;
-	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
-	default:
-		/* CURRENT ERROR */
-		buffer[0] = 0x70;
-		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/*
-		 * Returning ILLEGAL REQUEST would cause immediate IO errors on
-		 * Solaris initiators.  Returning NOT READY instead means the
-		 * operations will be retried a finite number of times and we
-		 * can survive intermittent errors.
-		 */
-		buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
-		/* LOGICAL UNIT COMMUNICATION FAILURE */
-		buffer[SPC_ASC_KEY_OFFSET] = 0x08;
-		break;
+		cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+		cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
+		rc = translate_sense_reason(cmd, reason);
+		if (rc)
+			return rc;
 	}
-	/*
-	 * This code uses linux/include/scsi/scsi.h SAM status codes!
-	 */
-	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
-	/*
-	 * Automatically padded, this value is encoded in the fabric's
-	 * data_length response PDU containing the SCSI defined sense data.
-	 */
-	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
 
-after_reason:
 	trace_target_cmd_complete(cmd);
 	return cmd->se_tfo->queue_status(cmd);
 }
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c448ef4..937cebf 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -25,6 +25,7 @@
 #include <linux/parser.h>
 #include <linux/vmalloc.h>
 #include <linux/uio_driver.h>
+#include <linux/stringify.h>
 #include <net/genetlink.h>
 #include <scsi/scsi_common.h>
 #include <scsi/scsi_proto.h>
@@ -538,14 +539,8 @@
 		UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
 		pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
 			cmd->se_cmd);
-		transport_generic_request_failure(cmd->se_cmd,
-			TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
-		cmd->se_cmd = NULL;
-		kmem_cache_free(tcmu_cmd_cache, cmd);
-		return;
-	}
-
-	if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
+		entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
+	} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
 		memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
 			       se_cmd->scsi_sense_length);
 
@@ -577,7 +572,6 @@
 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
 {
 	struct tcmu_mailbox *mb;
-	LIST_HEAD(cpl_cmds);
 	unsigned long flags;
 	int handled = 0;
 
@@ -905,7 +899,7 @@
 	WARN_ON(!PAGE_ALIGNED(udev->data_off));
 	WARN_ON(udev->data_size % PAGE_SIZE);
 
-	info->version = xstr(TCMU_MAILBOX_VERSION);
+	info->version = __stringify(TCMU_MAILBOX_VERSION);
 
 	info->mem[0].name = "tcm-user command & data buffer";
 	info->mem[0].addr = (phys_addr_t) udev->mb_addr;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 4515f52..47fe94e 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -450,6 +450,8 @@
 	memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
 	INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
 	INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
+	INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list);
+	spin_lock_init(&xcopy_pt_sess.sess_cmd_lock);
 
 	xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
 	xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
@@ -644,7 +646,7 @@
 	pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
 		(unsigned long long)src_lba, src_sectors, length);
 
-	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
+	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
 			      DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
 	xop->src_pt_cmd = xpt_cmd;
 
@@ -704,7 +706,7 @@
 	pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
 		(unsigned long long)dst_lba, dst_sectors, length);
 
-	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
+	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
 			      DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
 	xop->dst_pt_cmd = xpt_cmd;
 
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 6803172..aa3caca 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -255,7 +255,7 @@
 	struct ft_cmd *cmd = arg;
 	struct fc_frame_header *fh;
 
-	if (unlikely(IS_ERR(fp))) {
+	if (IS_ERR(fp)) {
 		/* XXX need to find cmd if queued */
 		cmd->seq = NULL;
 		cmd->aborted = true;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 118938e..5aabc4b 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -163,7 +163,7 @@
 
 config HISI_THERMAL
 	tristate "Hisilicon thermal driver"
-	depends on ARCH_HISI && CPU_THERMAL && OF
+	depends on (ARCH_HISI && CPU_THERMAL && OF) || COMPILE_TEST
 	help
 	  Enable this to plug hisilicon's thermal sensor driver into the Linux
 	  thermal framework. cpufreq is used as the cooling device to throttle
@@ -182,7 +182,7 @@
 
 config SPEAR_THERMAL
 	bool "SPEAr thermal sensor driver"
-	depends on PLAT_SPEAR
+	depends on PLAT_SPEAR || COMPILE_TEST
 	depends on OF
 	help
 	  Enable this to plug the SPEAr thermal sensor driver into the Linux
@@ -190,7 +190,7 @@
 
 config ROCKCHIP_THERMAL
 	tristate "Rockchip thermal driver"
-	depends on ARCH_ROCKCHIP
+	depends on ARCH_ROCKCHIP || COMPILE_TEST
 	depends on RESET_CONTROLLER
 	help
 	  Rockchip thermal driver provides support for Temperature sensor
@@ -208,7 +208,7 @@
 
 config KIRKWOOD_THERMAL
 	tristate "Temperature sensor on Marvell Kirkwood SoCs"
-	depends on MACH_KIRKWOOD
+	depends on MACH_KIRKWOOD || COMPILE_TEST
 	depends on OF
 	help
 	  Support for the Kirkwood thermal sensor driver into the Linux thermal
@@ -216,7 +216,7 @@
 
 config DOVE_THERMAL
 	tristate "Temperature sensor on Marvell Dove SoCs"
-	depends on ARCH_DOVE || MACH_DOVE
+	depends on ARCH_DOVE || MACH_DOVE || COMPILE_TEST
 	depends on OF
 	help
 	  Support for the Dove thermal sensor driver in the Linux thermal
@@ -234,7 +234,7 @@
 
 config ARMADA_THERMAL
 	tristate "Armada 370/XP thermal management"
-	depends on ARCH_MVEBU
+	depends on ARCH_MVEBU || COMPILE_TEST
 	depends on OF
 	help
 	  Enable this option if you want to have support for thermal management
@@ -340,12 +340,21 @@
 	tristate
 	depends on ACPI
 
+config INTEL_PCH_THERMAL
+	tristate "Intel PCH Thermal Reporting Driver"
+	depends on X86 && PCI
+	help
+	  Enable this to support thermal reporting on certain intel PCHs.
+	  Thermal reporting device will provide temperature reading,
+	  programmable trip points and other information.
+
 menu "Texas Instruments thermal drivers"
+depends on ARCH_HAS_BANDGAP || COMPILE_TEST
 source "drivers/thermal/ti-soc-thermal/Kconfig"
 endmenu
 
 menu "Samsung thermal drivers"
-depends on ARCH_EXYNOS
+depends on ARCH_EXYNOS || COMPILE_TEST
 source "drivers/thermal/samsung/Kconfig"
 endmenu
 
@@ -356,7 +365,7 @@
 
 config QCOM_SPMI_TEMP_ALARM
 	tristate "Qualcomm SPMI PMIC Temperature Alarm"
-	depends on OF && SPMI && IIO
+	depends on OF && (SPMI || COMPILE_TEST) && IIO
 	select REGMAP_SPMI
 	help
 	  This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP)
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 535dfee..26f1608 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -41,6 +41,7 @@
 obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL)	+= intel_quark_dts_thermal.o
 obj-$(CONFIG_TI_SOC_THERMAL)	+= ti-soc-thermal/
 obj-$(CONFIG_INT340X_THERMAL)  += int340x_thermal/
+obj-$(CONFIG_INTEL_PCH_THERMAL)	+= intel_pch_thermal.o
 obj-$(CONFIG_ST_THERMAL)	+= st/
 obj-$(CONFIG_TEGRA_SOCTHERM)	+= tegra_soctherm.o
 obj-$(CONFIG_HISI_THERMAL)     += hisi_thermal.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 01255fd..26b8d32 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -155,7 +155,7 @@
 }
 
 static int armada_get_temp(struct thermal_zone_device *thermal,
-			  unsigned long *temp)
+			  int *temp)
 {
 	struct armada_thermal_priv *priv = thermal->devdata;
 	unsigned long reg;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 620dcd4..42c6f71 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -262,7 +262,9 @@
  * efficiently.  Power is stored in mW, frequency in KHz.  The
  * resulting table is in ascending order.
  *
- * Return: 0 on success, -E* on error.
+ * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
+ * -ENOMEM if we run out of memory or -EAGAIN if an OPP was
+ * added/enabled while the function was executing.
  */
 static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
 				 u32 capacitance)
@@ -273,8 +275,6 @@
 	int num_opps = 0, cpu, i, ret = 0;
 	unsigned long freq;
 
-	rcu_read_lock();
-
 	for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
 		dev = get_cpu_device(cpu);
 		if (!dev) {
@@ -284,24 +284,20 @@
 		}
 
 		num_opps = dev_pm_opp_get_opp_count(dev);
-		if (num_opps > 0) {
+		if (num_opps > 0)
 			break;
-		} else if (num_opps < 0) {
-			ret = num_opps;
-			goto unlock;
-		}
+		else if (num_opps < 0)
+			return num_opps;
 	}
 
-	if (num_opps == 0) {
-		ret = -EINVAL;
-		goto unlock;
-	}
+	if (num_opps == 0)
+		return -EINVAL;
 
 	power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
-	if (!power_table) {
-		ret = -ENOMEM;
-		goto unlock;
-	}
+	if (!power_table)
+		return -ENOMEM;
+
+	rcu_read_lock();
 
 	for (freq = 0, i = 0;
 	     opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
@@ -309,6 +305,12 @@
 		u32 freq_mhz, voltage_mv;
 		u64 power;
 
+		if (i >= num_opps) {
+			rcu_read_unlock();
+			ret = -EAGAIN;
+			goto free_power_table;
+		}
+
 		freq_mhz = freq / 1000000;
 		voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
 
@@ -326,17 +328,22 @@
 		power_table[i].power = power;
 	}
 
-	if (i == 0) {
+	rcu_read_unlock();
+
+	if (i != num_opps) {
 		ret = PTR_ERR(opp);
-		goto unlock;
+		goto free_power_table;
 	}
 
 	cpufreq_device->cpu_dev = dev;
 	cpufreq_device->dyn_power_table = power_table;
 	cpufreq_device->dyn_power_table_entries = i;
 
-unlock:
-	rcu_read_unlock();
+	return 0;
+
+free_power_table:
+	kfree(power_table);
+
 	return ret;
 }
 
@@ -847,7 +854,7 @@
 	ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
 	if (ret) {
 		cool_dev = ERR_PTR(ret);
-		goto free_table;
+		goto free_power_table;
 	}
 
 	snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
@@ -889,6 +896,8 @@
 
 remove_idr:
 	release_idr(&cpufreq_idr, cpufreq_dev->id);
+free_power_table:
+	kfree(cpufreq_dev->dyn_power_table);
 free_table:
 	kfree(cpufreq_dev->freq_table);
 free_time_in_idle_timestamp:
@@ -1039,6 +1048,7 @@
 
 	thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
 	release_idr(&cpufreq_idr, cpufreq_dev->id);
+	kfree(cpufreq_dev->dyn_power_table);
 	kfree(cpufreq_dev->time_in_idle_timestamp);
 	kfree(cpufreq_dev->time_in_idle);
 	kfree(cpufreq_dev->freq_table);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 607b62c..e58bd0b 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -72,6 +72,7 @@
 	{ .compatible = "stericsson,db8500-cpufreq-cooling" },
 	{},
 };
+MODULE_DEVICE_TABLE(of, db8500_cpufreq_cooling_match);
 #endif
 
 static struct platform_driver db8500_cpufreq_cooling_driver = {
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 2fb273c..652acd8 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -107,8 +107,7 @@
 }
 
 /* Callback to get current temperature */
-static int db8500_sys_get_temp(struct thermal_zone_device *thermal,
-		unsigned long *temp)
+static int db8500_sys_get_temp(struct thermal_zone_device *thermal, int *temp)
 {
 	struct db8500_thermal_zone *pzone = thermal->devdata;
 
@@ -180,7 +179,7 @@
 
 /* Callback to get trip point temperature */
 static int db8500_sys_get_trip_temp(struct thermal_zone_device *thermal,
-		int trip, unsigned long *temp)
+		int trip, int *temp)
 {
 	struct db8500_thermal_zone *pzone = thermal->devdata;
 	struct db8500_thsens_platform_data *ptrips = pzone->trip_tab;
@@ -195,7 +194,7 @@
 
 /* Callback to get critical trip point temperature */
 static int db8500_sys_get_crit_temp(struct thermal_zone_device *thermal,
-		unsigned long *temp)
+		int *temp)
 {
 	struct db8500_thermal_zone *pzone = thermal->devdata;
 	struct db8500_thsens_platform_data *ptrips = pzone->trip_tab;
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index 09f6e30..a0bc9de 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -93,7 +93,7 @@
 }
 
 static int dove_get_temp(struct thermal_zone_device *thermal,
-			  unsigned long *temp)
+			  int *temp)
 {
 	unsigned long reg;
 	struct dove_thermal_priv *priv = thermal->devdata;
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index c2c10bb..34fe365 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -34,7 +34,7 @@
 static int get_trip_level(struct thermal_zone_device *tz)
 {
 	int count = 0;
-	unsigned long trip_temp;
+	int trip_temp;
 	enum thermal_trip_type trip_type;
 
 	if (tz->trips == 0 || !tz->ops->get_trip_temp)
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index c5dd76b..70836c5 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -25,14 +25,13 @@
 
 static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
 {
-	long trip_temp;
-	unsigned long trip_hyst;
+	int trip_temp, trip_hyst;
 	struct thermal_instance *instance;
 
 	tz->ops->get_trip_temp(tz, trip, &trip_temp);
 	tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
 
-	dev_dbg(&tz->device, "Trip%d[temp=%ld]:temp=%d:hyst=%ld\n",
+	dev_dbg(&tz->device, "Trip%d[temp=%d]:temp=%d:hyst=%d\n",
 				trip, trip_temp, tz->temperature,
 				trip_hyst);
 
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index b49f97c..36d0729 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -155,7 +155,7 @@
 	mutex_unlock(&data->thermal_lock);
 }
 
-static int hisi_thermal_get_temp(void *_sensor, long *temp)
+static int hisi_thermal_get_temp(void *_sensor, int *temp)
 {
 	struct hisi_thermal_sensor *sensor = _sensor;
 	struct hisi_thermal_data *data = sensor->thermal;
@@ -178,7 +178,7 @@
 	data->irq_bind_sensor = sensor_id;
 	mutex_unlock(&data->thermal_lock);
 
-	dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%ld, thres=%d\n",
+	dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%d, thres=%d\n",
 		sensor->id, data->irq_enabled, *temp, sensor->thres_temp);
 	/*
 	 * Bind irq to sensor for two cases:
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index fde4c28..4bec1d3 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -98,10 +98,10 @@
 	enum thermal_device_mode mode;
 	struct regmap *tempmon;
 	u32 c1, c2; /* See formula in imx_get_sensor_data() */
-	unsigned long temp_passive;
-	unsigned long temp_critical;
-	unsigned long alarm_temp;
-	unsigned long last_temp;
+	int temp_passive;
+	int temp_critical;
+	int alarm_temp;
+	int last_temp;
 	bool irq_enabled;
 	int irq;
 	struct clk *thermal_clk;
@@ -109,7 +109,7 @@
 };
 
 static void imx_set_panic_temp(struct imx_thermal_data *data,
-			       signed long panic_temp)
+			       int panic_temp)
 {
 	struct regmap *map = data->tempmon;
 	int critical_value;
@@ -121,7 +121,7 @@
 }
 
 static void imx_set_alarm_temp(struct imx_thermal_data *data,
-			       signed long alarm_temp)
+			       int alarm_temp)
 {
 	struct regmap *map = data->tempmon;
 	int alarm_value;
@@ -133,7 +133,7 @@
 			TEMPSENSE0_ALARM_VALUE_SHIFT);
 }
 
-static int imx_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
+static int imx_get_temp(struct thermal_zone_device *tz, int *temp)
 {
 	struct imx_thermal_data *data = tz->devdata;
 	struct regmap *map = data->tempmon;
@@ -189,13 +189,13 @@
 		if (data->alarm_temp == data->temp_critical &&
 			*temp < data->temp_passive) {
 			imx_set_alarm_temp(data, data->temp_passive);
-			dev_dbg(&tz->device, "thermal alarm off: T < %lu\n",
+			dev_dbg(&tz->device, "thermal alarm off: T < %d\n",
 				data->alarm_temp / 1000);
 		}
 	}
 
 	if (*temp != data->last_temp) {
-		dev_dbg(&tz->device, "millicelsius: %ld\n", *temp);
+		dev_dbg(&tz->device, "millicelsius: %d\n", *temp);
 		data->last_temp = *temp;
 	}
 
@@ -262,8 +262,7 @@
 	return 0;
 }
 
-static int imx_get_crit_temp(struct thermal_zone_device *tz,
-			     unsigned long *temp)
+static int imx_get_crit_temp(struct thermal_zone_device *tz, int *temp)
 {
 	struct imx_thermal_data *data = tz->devdata;
 
@@ -272,7 +271,7 @@
 }
 
 static int imx_get_trip_temp(struct thermal_zone_device *tz, int trip,
-			     unsigned long *temp)
+			     int *temp)
 {
 	struct imx_thermal_data *data = tz->devdata;
 
@@ -282,7 +281,7 @@
 }
 
 static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip,
-			     unsigned long temp)
+			     int temp)
 {
 	struct imx_thermal_data *data = tz->devdata;
 
@@ -434,7 +433,7 @@
 {
 	struct imx_thermal_data *data = dev;
 
-	dev_dbg(&data->tz->device, "THERMAL ALARM: T > %lu\n",
+	dev_dbg(&data->tz->device, "THERMAL ALARM: T > %d\n",
 		data->alarm_temp / 1000);
 
 	thermal_zone_device_update(data->tz);
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index 031018e..5836e55 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -186,7 +186,7 @@
 }
 
 static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
-			unsigned long *temp)
+			int *temp)
 {
 	*temp = 20 * 1000; /* faked temp sensor with 20C */
 	return 0;
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
index 1e25133..b9b2666 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
@@ -20,7 +20,7 @@
 #include "int340x_thermal_zone.h"
 
 static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
-					 unsigned long *temp)
+					 int *temp)
 {
 	struct int34x_thermal_zone *d = zone->devdata;
 	unsigned long long tmp;
@@ -49,7 +49,7 @@
 }
 
 static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
-					 int trip, unsigned long *temp)
+					 int trip, int *temp)
 {
 	struct int34x_thermal_zone *d = zone->devdata;
 	int i;
@@ -114,7 +114,7 @@
 }
 
 static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
-				      int trip, unsigned long temp)
+				      int trip, int temp)
 {
 	struct int34x_thermal_zone *d = zone->devdata;
 	acpi_status status;
@@ -136,7 +136,7 @@
 
 
 static int int340x_thermal_get_trip_hyst(struct thermal_zone_device *zone,
-		int trip, unsigned long *temp)
+		int trip, int *temp)
 {
 	struct int34x_thermal_zone *d = zone->devdata;
 	acpi_status status;
@@ -163,7 +163,7 @@
 };
 
 static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
-				      unsigned long *temp)
+				      int *temp)
 {
 	unsigned long long r;
 	acpi_status status;
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
index 9f38ab7..aaadf72 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
@@ -21,7 +21,7 @@
 #define INT340X_THERMAL_MAX_ACT_TRIP_COUNT	10
 
 struct active_trip {
-	unsigned long temp;
+	int temp;
 	int id;
 	bool valid;
 };
@@ -31,11 +31,11 @@
 	struct active_trip act_trips[INT340X_THERMAL_MAX_ACT_TRIP_COUNT];
 	unsigned long *aux_trips;
 	int aux_trip_nr;
-	unsigned long psv_temp;
+	int psv_temp;
 	int psv_trip_id;
-	unsigned long crt_temp;
+	int crt_temp;
 	int crt_trip_id;
-	unsigned long hot_temp;
+	int hot_temp;
 	int hot_trip_id;
 	struct thermal_zone_device *zone;
 	struct thermal_zone_device_ops *override_ops;
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 3df3dc3..ccc0ad0 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -145,7 +145,7 @@
 	return -EINVAL;
 }
 
-static int read_temp_msr(unsigned long *temp)
+static int read_temp_msr(int *temp)
 {
 	int cpu;
 	u32 eax, edx;
@@ -177,7 +177,7 @@
 }
 
 static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
-					 unsigned long *temp)
+					 int *temp)
 {
 	int ret;
 
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
new file mode 100644
index 0000000..50c7da7
--- /dev/null
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -0,0 +1,283 @@
+/* intel_pch_thermal.c - Intel PCH Thermal driver
+ *
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * Authors:
+ *     Tushar Dave <tushar.n.dave@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/thermal.h>
+
+/* Intel PCH thermal Device IDs */
+#define PCH_THERMAL_DID_WPT	0x9CA4 /* Wildcat Point */
+
+/* Wildcat Point-LP  PCH Thermal registers */
+#define WPT_TEMP	0x0000	/* Temperature */
+#define WPT_TSC	0x04	/* Thermal Sensor Control */
+#define WPT_TSS	0x06	/* Thermal Sensor Status */
+#define WPT_TSEL	0x08	/* Thermal Sensor Enable and Lock */
+#define WPT_TSREL	0x0A	/* Thermal Sensor Report Enable and Lock */
+#define WPT_TSMIC	0x0C	/* Thermal Sensor SMI Control */
+#define WPT_CTT	0x0010	/* Catastrophic Trip Point */
+#define WPT_TAHV	0x0014	/* Thermal Alert High Value */
+#define WPT_TALV	0x0018	/* Thermal Alert Low Value */
+#define WPT_TL		0x00000040	/* Throttle Value */
+#define WPT_PHL	0x0060	/* PCH Hot Level */
+#define WPT_PHLC	0x62	/* PHL Control */
+#define WPT_TAS	0x80	/* Thermal Alert Status */
+#define WPT_TSPIEN	0x82	/* PCI Interrupt Event Enables */
+#define WPT_TSGPEN	0x84	/* General Purpose Event Enables */
+
+/*  Wildcat Point-LP  PCH Thermal Register bit definitions */
+#define WPT_TEMP_TSR	0x00ff	/* Temp TS Reading */
+#define WPT_TSC_CPDE	0x01	/* Catastrophic Power-Down Enable */
+#define WPT_TSS_TSDSS	0x10	/* Thermal Sensor Dynamic Shutdown Status */
+#define WPT_TSS_GPES	0x08	/* GPE status */
+#define WPT_TSEL_ETS	0x01    /* Enable TS */
+#define WPT_TSEL_PLDB	0x80	/* TSEL Policy Lock-Down Bit */
+#define WPT_TL_TOL	0x000001FF	/* T0 Level */
+#define WPT_TL_T1L	0x1ff00000	/* T1 Level */
+#define WPT_TL_TTEN	0x20000000	/* TT Enable */
+
+static char driver_name[] = "Intel PCH thermal driver";
+
+struct pch_thermal_device {
+	void __iomem *hw_base;
+	const struct pch_dev_ops *ops;
+	struct pci_dev *pdev;
+	struct thermal_zone_device *tzd;
+	int crt_trip_id;
+	unsigned long crt_temp;
+	int hot_trip_id;
+	unsigned long hot_temp;
+};
+
+static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
+{
+	u8 tsel;
+	u16 trip_temp;
+
+	*nr_trips = 0;
+
+	/* Check if BIOS has already enabled thermal sensor */
+	if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS))
+		goto read_trips;
+
+	tsel = readb(ptd->hw_base + WPT_TSEL);
+	/*
+	 * When TSEL's Policy Lock-Down bit is 1, TSEL become RO.
+	 * If so, thermal sensor cannot enable. Bail out.
+	 */
+	if (tsel & WPT_TSEL_PLDB) {
+		dev_err(&ptd->pdev->dev, "Sensor can't be enabled\n");
+		return -ENODEV;
+	}
+
+	writeb(tsel|WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL);
+	if (!(WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS))) {
+		dev_err(&ptd->pdev->dev, "Sensor can't be enabled\n");
+		return -ENODEV;
+	}
+
+read_trips:
+	ptd->crt_trip_id = -1;
+	trip_temp = readw(ptd->hw_base + WPT_CTT);
+	trip_temp &= 0x1FF;
+	if (trip_temp) {
+		/* Resolution of 1/2 degree C and an offset of -50C */
+		ptd->crt_temp = trip_temp * 1000 / 2 - 50000;
+		ptd->crt_trip_id = 0;
+		++(*nr_trips);
+	}
+
+	ptd->hot_trip_id = -1;
+	trip_temp = readw(ptd->hw_base + WPT_PHL);
+	trip_temp &= 0x1FF;
+	if (trip_temp) {
+		/* Resolution of 1/2 degree C and an offset of -50C */
+		ptd->hot_temp = trip_temp * 1000 / 2 - 50000;
+		ptd->hot_trip_id = *nr_trips;
+		++(*nr_trips);
+	}
+
+	return 0;
+}
+
+static int pch_wpt_get_temp(struct pch_thermal_device *ptd, int *temp)
+{
+	u8 wpt_temp;
+
+	wpt_temp = WPT_TEMP_TSR & readl(ptd->hw_base + WPT_TEMP);
+
+	/* Resolution of 1/2 degree C and an offset of -50C */
+	*temp = (wpt_temp * 1000 / 2 - 50000);
+
+	return 0;
+}
+
+struct pch_dev_ops {
+	int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips);
+	int (*get_temp)(struct pch_thermal_device *ptd, int *temp);
+};
+
+
+/* dev ops for Wildcat Point */
+static struct pch_dev_ops pch_dev_ops_wpt = {
+	.hw_init = pch_wpt_init,
+	.get_temp = pch_wpt_get_temp,
+};
+
+static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp)
+{
+	struct pch_thermal_device *ptd = tzd->devdata;
+
+	return	ptd->ops->get_temp(ptd, temp);
+}
+
+static int pch_get_trip_type(struct thermal_zone_device *tzd, int trip,
+			     enum thermal_trip_type *type)
+{
+	struct pch_thermal_device *ptd = tzd->devdata;
+
+	if (ptd->crt_trip_id == trip)
+		*type = THERMAL_TRIP_CRITICAL;
+	else if (ptd->hot_trip_id == trip)
+		*type = THERMAL_TRIP_HOT;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int pch_get_trip_temp(struct thermal_zone_device *tzd, int trip, int *temp)
+{
+	struct pch_thermal_device *ptd = tzd->devdata;
+
+	if (ptd->crt_trip_id == trip)
+		*temp = ptd->crt_temp;
+	else if (ptd->hot_trip_id == trip)
+		*temp = ptd->hot_temp;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static struct thermal_zone_device_ops tzd_ops = {
+	.get_temp = pch_thermal_get_temp,
+	.get_trip_type = pch_get_trip_type,
+	.get_trip_temp = pch_get_trip_temp,
+};
+
+
+static int intel_pch_thermal_probe(struct pci_dev *pdev,
+				   const struct pci_device_id *id)
+{
+	struct pch_thermal_device *ptd;
+	int err;
+	int nr_trips;
+	char *dev_name;
+
+	ptd = devm_kzalloc(&pdev->dev, sizeof(*ptd), GFP_KERNEL);
+	if (!ptd)
+		return -ENOMEM;
+
+	switch (pdev->device) {
+	case PCH_THERMAL_DID_WPT:
+		ptd->ops = &pch_dev_ops_wpt;
+		dev_name = "pch_wildcat_point";
+		break;
+	default:
+		dev_err(&pdev->dev, "unknown pch thermal device\n");
+		return -ENODEV;
+	}
+
+	pci_set_drvdata(pdev, ptd);
+	ptd->pdev = pdev;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable pci device\n");
+		return err;
+	}
+
+	err = pci_request_regions(pdev, driver_name);
+	if (err) {
+		dev_err(&pdev->dev, "failed to request pci region\n");
+		goto error_disable;
+	}
+
+	ptd->hw_base = pci_ioremap_bar(pdev, 0);
+	if (!ptd->hw_base) {
+		err = -ENOMEM;
+		dev_err(&pdev->dev, "failed to map mem base\n");
+		goto error_release;
+	}
+
+	err = ptd->ops->hw_init(ptd, &nr_trips);
+	if (err)
+		goto error_cleanup;
+
+	ptd->tzd = thermal_zone_device_register(dev_name, nr_trips, 0, ptd,
+						&tzd_ops, NULL, 0, 0);
+	if (IS_ERR(ptd->tzd)) {
+		dev_err(&pdev->dev, "Failed to register thermal zone %s\n",
+			dev_name);
+		err = PTR_ERR(ptd->tzd);
+		goto error_cleanup;
+	}
+
+	return 0;
+
+error_cleanup:
+	iounmap(ptd->hw_base);
+error_release:
+	pci_release_regions(pdev);
+error_disable:
+	pci_disable_device(pdev);
+	dev_err(&pdev->dev, "pci device failed to probe\n");
+	return err;
+}
+
+static void intel_pch_thermal_remove(struct pci_dev *pdev)
+{
+	struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
+
+	thermal_zone_device_unregister(ptd->tzd);
+	iounmap(ptd->hw_base);
+	pci_set_drvdata(pdev, NULL);
+	pci_release_region(pdev, 0);
+	pci_disable_device(pdev);
+}
+
+static struct pci_device_id intel_pch_thermal_id[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
+	{ 0, },
+};
+MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
+
+static struct pci_driver intel_pch_thermal_driver = {
+	.name		= "intel_pch_thermal",
+	.id_table	= intel_pch_thermal_id,
+	.probe		= intel_pch_thermal_probe,
+	.remove		= intel_pch_thermal_remove,
+};
+
+module_pci_driver(intel_pch_thermal_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel PCH Thermal driver");
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 2ac0c70..6c79588 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -693,11 +693,14 @@
 	{ X86_VENDOR_INTEL, 6, 0x3f},
 	{ X86_VENDOR_INTEL, 6, 0x45},
 	{ X86_VENDOR_INTEL, 6, 0x46},
+	{ X86_VENDOR_INTEL, 6, 0x47},
 	{ X86_VENDOR_INTEL, 6, 0x4c},
 	{ X86_VENDOR_INTEL, 6, 0x4d},
+	{ X86_VENDOR_INTEL, 6, 0x4e},
 	{ X86_VENDOR_INTEL, 6, 0x4f},
 	{ X86_VENDOR_INTEL, 6, 0x56},
 	{ X86_VENDOR_INTEL, 6, 0x57},
+	{ X86_VENDOR_INTEL, 6, 0x5e},
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/intel_quark_dts_thermal.c b/drivers/thermal/intel_quark_dts_thermal.c
index 4434ec8..5ed90e6 100644
--- a/drivers/thermal/intel_quark_dts_thermal.c
+++ b/drivers/thermal/intel_quark_dts_thermal.c
@@ -186,7 +186,7 @@
 	return ret;
 }
 
-static int _get_trip_temp(int trip, unsigned long *temp)
+static int _get_trip_temp(int trip, int *temp)
 {
 	int status;
 	u32 out;
@@ -212,19 +212,18 @@
 }
 
 static inline int sys_get_trip_temp(struct thermal_zone_device *tzd,
-				int trip, unsigned long *temp)
+				int trip, int *temp)
 {
 	return _get_trip_temp(trip, temp);
 }
 
-static inline int sys_get_crit_temp(struct thermal_zone_device *tzd,
-				unsigned long *temp)
+static inline int sys_get_crit_temp(struct thermal_zone_device *tzd, int *temp)
 {
 	return _get_trip_temp(QRK_DTS_ID_TP_CRITICAL, temp);
 }
 
 static int update_trip_temp(struct soc_sensor_entry *aux_entry,
-				int trip, unsigned long temp)
+				int trip, int temp)
 {
 	u32 out;
 	u32 temp_out;
@@ -272,7 +271,7 @@
 }
 
 static inline int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
-				unsigned long temp)
+				int temp)
 {
 	return update_trip_temp(tzd->devdata, trip, temp);
 }
@@ -289,7 +288,7 @@
 }
 
 static int sys_get_curr_temp(struct thermal_zone_device *tzd,
-				unsigned long *temp)
+				int *temp)
 {
 	u32 out;
 	int ret;
diff --git a/drivers/thermal/intel_soc_dts_iosf.c b/drivers/thermal/intel_soc_dts_iosf.c
index 42e4b6a..5841d1d 100644
--- a/drivers/thermal/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel_soc_dts_iosf.c
@@ -80,7 +80,7 @@
 }
 
 static int sys_get_trip_temp(struct thermal_zone_device *tzd, int trip,
-			     unsigned long *temp)
+			     int *temp)
 {
 	int status;
 	u32 out;
@@ -106,7 +106,7 @@
 }
 
 static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts,
-			    int thres_index, unsigned long temp,
+			    int thres_index, int temp,
 			    enum thermal_trip_type trip_type)
 {
 	int status;
@@ -196,7 +196,7 @@
 }
 
 static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
-			     unsigned long temp)
+			     int temp)
 {
 	struct intel_soc_dts_sensor_entry *dts = tzd->devdata;
 	struct intel_soc_dts_sensors *sensors = dts->sensors;
@@ -226,7 +226,7 @@
 }
 
 static int sys_get_curr_temp(struct thermal_zone_device *tzd,
-			     unsigned long *temp)
+			     int *temp)
 {
 	int status;
 	u32 out;
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
index 11041fe..8922366 100644
--- a/drivers/thermal/kirkwood_thermal.c
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -33,7 +33,7 @@
 };
 
 static int kirkwood_get_temp(struct thermal_zone_device *thermal,
-			  unsigned long *temp)
+			  int *temp)
 {
 	unsigned long reg;
 	struct kirkwood_thermal_priv *priv = thermal->devdata;
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index b295b2b..42b7d42 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -91,7 +91,7 @@
 /***   DT thermal zone device callbacks   ***/
 
 static int of_thermal_get_temp(struct thermal_zone_device *tz,
-			       unsigned long *temp)
+			       int *temp)
 {
 	struct __thermal_zone *data = tz->devdata;
 
@@ -177,7 +177,7 @@
  * Return: zero on success, error code otherwise
  */
 static int of_thermal_set_emul_temp(struct thermal_zone_device *tz,
-				    unsigned long temp)
+				    int temp)
 {
 	struct __thermal_zone *data = tz->devdata;
 
@@ -311,7 +311,7 @@
 }
 
 static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip,
-				    unsigned long *temp)
+				    int *temp)
 {
 	struct __thermal_zone *data = tz->devdata;
 
@@ -324,7 +324,7 @@
 }
 
 static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
-				    unsigned long temp)
+				    int temp)
 {
 	struct __thermal_zone *data = tz->devdata;
 
@@ -338,7 +338,7 @@
 }
 
 static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip,
-				    unsigned long *hyst)
+				    int *hyst)
 {
 	struct __thermal_zone *data = tz->devdata;
 
@@ -351,7 +351,7 @@
 }
 
 static int of_thermal_set_trip_hyst(struct thermal_zone_device *tz, int trip,
-				    unsigned long hyst)
+				    int hyst)
 {
 	struct __thermal_zone *data = tz->devdata;
 
@@ -365,7 +365,7 @@
 }
 
 static int of_thermal_get_crit_temp(struct thermal_zone_device *tz,
-				    unsigned long *temp)
+				    int *temp)
 {
 	struct __thermal_zone *data = tz->devdata;
 	int i;
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 2516769..e570ff0 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -24,6 +24,8 @@
 
 #include "thermal_core.h"
 
+#define INVALID_TRIP -1
+
 #define FRAC_BITS 10
 #define int_to_frac(x) ((x) << FRAC_BITS)
 #define frac_to_int(x) ((x) >> FRAC_BITS)
@@ -56,16 +58,21 @@
 
 /**
  * struct power_allocator_params - parameters for the power allocator governor
+ * @allocated_tzp:	whether we have allocated tzp for this thermal zone and
+ *			it needs to be freed on unbind
  * @err_integral:	accumulated error in the PID controller.
  * @prev_err:	error in the previous iteration of the PID controller.
  *		Used to calculate the derivative term.
  * @trip_switch_on:	first passive trip point of the thermal zone.  The
  *			governor switches on when this trip point is crossed.
+ *			If the thermal zone only has one passive trip point,
+ *			@trip_switch_on should be INVALID_TRIP.
  * @trip_max_desired_temperature:	last passive trip point of the thermal
  *					zone.  The temperature we are
  *					controlling for.
  */
 struct power_allocator_params {
+	bool allocated_tzp;
 	s64 err_integral;
 	s32 prev_err;
 	int trip_switch_on;
@@ -73,6 +80,98 @@
 };
 
 /**
+ * estimate_sustainable_power() - Estimate the sustainable power of a thermal zone
+ * @tz: thermal zone we are operating in
+ *
+ * For thermal zones that don't provide a sustainable_power in their
+ * thermal_zone_params, estimate one.  Calculate it using the minimum
+ * power of all the cooling devices as that gives a valid value that
+ * can give some degree of functionality.  For optimal performance of
+ * this governor, provide a sustainable_power in the thermal zone's
+ * thermal_zone_params.
+ */
+static u32 estimate_sustainable_power(struct thermal_zone_device *tz)
+{
+	u32 sustainable_power = 0;
+	struct thermal_instance *instance;
+	struct power_allocator_params *params = tz->governor_data;
+
+	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+		struct thermal_cooling_device *cdev = instance->cdev;
+		u32 min_power;
+
+		if (instance->trip != params->trip_max_desired_temperature)
+			continue;
+
+		if (power_actor_get_min_power(cdev, tz, &min_power))
+			continue;
+
+		sustainable_power += min_power;
+	}
+
+	return sustainable_power;
+}
+
+/**
+ * estimate_pid_constants() - Estimate the constants for the PID controller
+ * @tz:		thermal zone for which to estimate the constants
+ * @sustainable_power:	sustainable power for the thermal zone
+ * @trip_switch_on:	trip point number for the switch on temperature
+ * @control_temp:	target temperature for the power allocator governor
+ * @force:	whether to force the update of the constants
+ *
+ * This function is used to update the estimation of the PID
+ * controller constants in struct thermal_zone_parameters.
+ * Sustainable power is provided in case it was estimated.  The
+ * estimated sustainable_power should not be stored in the
+ * thermal_zone_parameters so it has to be passed explicitly to this
+ * function.
+ *
+ * If @force is not set, the values in the thermal zone's parameters
+ * are preserved if they are not zero.  If @force is set, the values
+ * in thermal zone's parameters are overwritten.
+ */
+static void estimate_pid_constants(struct thermal_zone_device *tz,
+				   u32 sustainable_power, int trip_switch_on,
+				   int control_temp, bool force)
+{
+	int ret;
+	int switch_on_temp;
+	u32 temperature_threshold;
+
+	ret = tz->ops->get_trip_temp(tz, trip_switch_on, &switch_on_temp);
+	if (ret)
+		switch_on_temp = 0;
+
+	temperature_threshold = control_temp - switch_on_temp;
+	/*
+	 * estimate_pid_constants() tries to find appropriate default
+	 * values for thermal zones that don't provide them. If a
+	 * system integrator has configured a thermal zone with two
+	 * passive trip points at the same temperature, that person
+	 * hasn't put any effort to set up the thermal zone properly
+	 * so just give up.
+	 */
+	if (!temperature_threshold)
+		return;
+
+	if (!tz->tzp->k_po || force)
+		tz->tzp->k_po = int_to_frac(sustainable_power) /
+			temperature_threshold;
+
+	if (!tz->tzp->k_pu || force)
+		tz->tzp->k_pu = int_to_frac(2 * sustainable_power) /
+			temperature_threshold;
+
+	if (!tz->tzp->k_i || force)
+		tz->tzp->k_i = int_to_frac(10) / 1000;
+	/*
+	 * The default for k_d and integral_cutoff is 0, so we can
+	 * leave them as they are.
+	 */
+}
+
+/**
  * pid_controller() - PID controller
  * @tz:	thermal zone we are operating in
  * @current_temp:	the current temperature in millicelsius
@@ -92,17 +191,27 @@
  * Return: The power budget for the next period.
  */
 static u32 pid_controller(struct thermal_zone_device *tz,
-			  unsigned long current_temp,
-			  unsigned long control_temp,
+			  int current_temp,
+			  int control_temp,
 			  u32 max_allocatable_power)
 {
 	s64 p, i, d, power_range;
 	s32 err, max_power_frac;
+	u32 sustainable_power;
 	struct power_allocator_params *params = tz->governor_data;
 
 	max_power_frac = int_to_frac(max_allocatable_power);
 
-	err = ((s32)control_temp - (s32)current_temp);
+	if (tz->tzp->sustainable_power) {
+		sustainable_power = tz->tzp->sustainable_power;
+	} else {
+		sustainable_power = estimate_sustainable_power(tz);
+		estimate_pid_constants(tz, sustainable_power,
+				       params->trip_switch_on, control_temp,
+				       true);
+	}
+
+	err = control_temp - current_temp;
 	err = int_to_frac(err);
 
 	/* Calculate the proportional term */
@@ -139,7 +248,7 @@
 	power_range = p + i + d;
 
 	/* feed-forward the known sustainable dissipatable power */
-	power_range = tz->tzp->sustainable_power + frac_to_int(power_range);
+	power_range = sustainable_power + frac_to_int(power_range);
 
 	power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power);
 
@@ -223,8 +332,8 @@
 }
 
 static int allocate_power(struct thermal_zone_device *tz,
-			  unsigned long current_temp,
-			  unsigned long control_temp)
+			  int current_temp,
+			  int control_temp)
 {
 	struct thermal_instance *instance;
 	struct power_allocator_params *params = tz->governor_data;
@@ -247,6 +356,11 @@
 		}
 	}
 
+	if (!num_actors) {
+		ret = -ENODEV;
+		goto unlock;
+	}
+
 	/*
 	 * We need to allocate five arrays of the same size:
 	 * req_power, max_power, granted_power, extra_actor_power and
@@ -331,7 +445,7 @@
 				      granted_power, total_granted_power,
 				      num_actors, power_range,
 				      max_allocatable_power, current_temp,
-				      (s32)control_temp - (s32)current_temp);
+				      control_temp - current_temp);
 
 	kfree(req_power);
 unlock:
@@ -340,43 +454,66 @@
 	return ret;
 }
 
-static int get_governor_trips(struct thermal_zone_device *tz,
-			      struct power_allocator_params *params)
+/**
+ * get_governor_trips() - get the number of the two trip points that are key for this governor
+ * @tz:	thermal zone to operate on
+ * @params:	pointer to private data for this governor
+ *
+ * The power allocator governor works optimally with two trips points:
+ * a "switch on" trip point and a "maximum desired temperature".  These
+ * are defined as the first and last passive trip points.
+ *
+ * If there is only one trip point, then that's considered to be the
+ * "maximum desired temperature" trip point and the governor is always
+ * on.  If there are no passive or active trip points, then the
+ * governor won't do anything.  In fact, its throttle function
+ * won't be called at all.
+ */
+static void get_governor_trips(struct thermal_zone_device *tz,
+			       struct power_allocator_params *params)
 {
-	int i, ret, last_passive;
+	int i, last_active, last_passive;
 	bool found_first_passive;
 
 	found_first_passive = false;
-	last_passive = -1;
-	ret = -EINVAL;
+	last_active = INVALID_TRIP;
+	last_passive = INVALID_TRIP;
 
 	for (i = 0; i < tz->trips; i++) {
 		enum thermal_trip_type type;
+		int ret;
 
 		ret = tz->ops->get_trip_type(tz, i, &type);
-		if (ret)
-			return ret;
+		if (ret) {
+			dev_warn(&tz->device,
+				 "Failed to get trip point %d type: %d\n", i,
+				 ret);
+			continue;
+		}
 
-		if (!found_first_passive) {
-			if (type == THERMAL_TRIP_PASSIVE) {
+		if (type == THERMAL_TRIP_PASSIVE) {
+			if (!found_first_passive) {
 				params->trip_switch_on = i;
 				found_first_passive = true;
+			} else  {
+				last_passive = i;
 			}
-		} else if (type == THERMAL_TRIP_PASSIVE) {
-			last_passive = i;
+		} else if (type == THERMAL_TRIP_ACTIVE) {
+			last_active = i;
 		} else {
 			break;
 		}
 	}
 
-	if (last_passive != -1) {
+	if (last_passive != INVALID_TRIP) {
 		params->trip_max_desired_temperature = last_passive;
-		ret = 0;
+	} else if (found_first_passive) {
+		params->trip_max_desired_temperature = params->trip_switch_on;
+		params->trip_switch_on = INVALID_TRIP;
 	} else {
-		ret = -EINVAL;
+		params->trip_switch_on = INVALID_TRIP;
+		params->trip_max_desired_temperature = last_active;
 	}
-
-	return ret;
 }
 
 static void reset_pid_controller(struct power_allocator_params *params)
@@ -405,60 +542,45 @@
  * power_allocator_bind() - bind the power_allocator governor to a thermal zone
  * @tz:	thermal zone to bind it to
  *
- * Check that the thermal zone is valid for this governor, that is, it
- * has two thermal trips.  If so, initialize the PID controller
- * parameters and bind it to the thermal zone.
+ * Initialize the PID controller parameters and bind it to the thermal
+ * zone.
  *
- * Return: 0 on success, -EINVAL if the trips were invalid or -ENOMEM
- * if we ran out of memory.
+ * Return: 0 on success, or -ENOMEM if we ran out of memory.
  */
 static int power_allocator_bind(struct thermal_zone_device *tz)
 {
 	int ret;
 	struct power_allocator_params *params;
-	unsigned long switch_on_temp, control_temp;
-	u32 temperature_threshold;
-
-	if (!tz->tzp || !tz->tzp->sustainable_power) {
-		dev_err(&tz->device,
-			"power_allocator: missing sustainable_power\n");
-		return -EINVAL;
-	}
+	int control_temp;
 
 	params = kzalloc(sizeof(*params), GFP_KERNEL);
 	if (!params)
 		return -ENOMEM;
 
-	ret = get_governor_trips(tz, params);
-	if (ret) {
-		dev_err(&tz->device,
-			"thermal zone %s has wrong trip setup for power allocator\n",
-			tz->type);
-		goto free;
+	if (!tz->tzp) {
+		tz->tzp = kzalloc(sizeof(*tz->tzp), GFP_KERNEL);
+		if (!tz->tzp) {
+			ret = -ENOMEM;
+			goto free_params;
+		}
+
+		params->allocated_tzp = true;
 	}
 
-	ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
-				     &switch_on_temp);
-	if (ret)
-		goto free;
+	if (!tz->tzp->sustainable_power)
+		dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n");
 
-	ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature,
-				     &control_temp);
-	if (ret)
-		goto free;
+	get_governor_trips(tz, params);
 
-	temperature_threshold = control_temp - switch_on_temp;
-
-	tz->tzp->k_po = tz->tzp->k_po ?:
-		int_to_frac(tz->tzp->sustainable_power) / temperature_threshold;
-	tz->tzp->k_pu = tz->tzp->k_pu ?:
-		int_to_frac(2 * tz->tzp->sustainable_power) /
-		temperature_threshold;
-	tz->tzp->k_i = tz->tzp->k_i ?: int_to_frac(10) / 1000;
-	/*
-	 * The default for k_d and integral_cutoff is 0, so we can
-	 * leave them as they are.
-	 */
+	if (tz->trips > 0) {
+		ret = tz->ops->get_trip_temp(tz,
+					params->trip_max_desired_temperature,
+					&control_temp);
+		if (!ret)
+			estimate_pid_constants(tz, tz->tzp->sustainable_power,
+					       params->trip_switch_on,
+					       control_temp, false);
+	}
 
 	reset_pid_controller(params);
 
@@ -466,14 +588,23 @@
 
 	return 0;
 
-free:
+free_params:
 	kfree(params);
+
 	return ret;
 }
 
 static void power_allocator_unbind(struct thermal_zone_device *tz)
 {
+	struct power_allocator_params *params = tz->governor_data;
+
 	dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
+
+	if (params->allocated_tzp) {
+		kfree(tz->tzp);
+		tz->tzp = NULL;
+	}
+
 	kfree(tz->governor_data);
 	tz->governor_data = NULL;
 }
@@ -481,7 +612,7 @@
 static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
 {
 	int ret;
-	unsigned long switch_on_temp, control_temp, current_temp;
+	int switch_on_temp, control_temp, current_temp;
 	struct power_allocator_params *params = tz->governor_data;
 
 	/*
@@ -499,13 +630,7 @@
 
 	ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
 				     &switch_on_temp);
-	if (ret) {
-		dev_warn(&tz->device,
-			 "Failed to get switch on temperature: %d\n", ret);
-		return ret;
-	}
-
-	if (current_temp < switch_on_temp) {
+	if (!ret && (current_temp < switch_on_temp)) {
 		tz->passive = 0;
 		reset_pid_controller(params);
 		allow_maximum_power(tz);
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index c8d27b8..b677aad 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -117,7 +117,7 @@
 	return 0;
 }
 
-static int qpnp_tm_get_temp(void *data, long *temp)
+static int qpnp_tm_get_temp(void *data, int *temp)
 {
 	struct qpnp_tm_chip *chip = data;
 	int ret, mili_celsius;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index fe4e767..5d4ae7d 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -200,8 +200,7 @@
 	return ret;
 }
 
-static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
-				 unsigned long *temp)
+static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
 {
 	struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
 
@@ -235,7 +234,7 @@
 }
 
 static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
-				      int trip, unsigned long *temp)
+				      int trip, int *temp)
 {
 	struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
 	struct device *dev = rcar_priv_to_dev(priv);
@@ -299,7 +298,7 @@
 static void rcar_thermal_work(struct work_struct *work)
 {
 	struct rcar_thermal_priv *priv;
-	unsigned long cctemp, nctemp;
+	int cctemp, nctemp;
 
 	priv = container_of(work, struct rcar_thermal_priv, work.work);
 
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index cd8f5f93..c89ffb2 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -64,7 +64,7 @@
 	void (*control)(void __iomem *reg, bool on);
 
 	/* Per-sensor methods */
-	int (*get_temp)(int chn, void __iomem *reg, long *temp);
+	int (*get_temp)(int chn, void __iomem *reg, int *temp);
 	void (*set_tshut_temp)(int chn, void __iomem *reg, long temp);
 	void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
 };
@@ -191,7 +191,7 @@
 	return 0;
 }
 
-static long rk_tsadcv2_code_to_temp(u32 code)
+static int rk_tsadcv2_code_to_temp(u32 code)
 {
 	unsigned int low = 0;
 	unsigned int high = ARRAY_SIZE(v2_code_table) - 1;
@@ -277,7 +277,7 @@
 	writel_relaxed(val, regs + TSADCV2_AUTO_CON);
 }
 
-static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, long *temp)
+static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, int *temp)
 {
 	u32 val;
 
@@ -366,7 +366,7 @@
 	return IRQ_HANDLED;
 }
 
-static int rockchip_thermal_get_temp(void *_sensor, long *out_temp)
+static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
 {
 	struct rockchip_thermal_sensor *sensor = _sensor;
 	struct rockchip_thermal_data *thermal = sensor->thermal;
@@ -374,7 +374,7 @@
 	int retval;
 
 	retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp);
-	dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %ld, retval: %d\n",
+	dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
 		sensor->id, *out_temp, retval);
 
 	return retval;
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index c96ff10..0bae8cc 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -207,8 +207,7 @@
 	int (*tmu_initialize)(struct platform_device *pdev);
 	void (*tmu_control)(struct platform_device *pdev, bool on);
 	int (*tmu_read)(struct exynos_tmu_data *data);
-	void (*tmu_set_emulation)(struct exynos_tmu_data *data,
-				  unsigned long temp);
+	void (*tmu_set_emulation)(struct exynos_tmu_data *data, int temp);
 	void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
 };
 
@@ -216,7 +215,7 @@
 {
 	char data[10], *envp[] = { data, NULL };
 	struct thermal_zone_device *tz = p->tzd;
-	unsigned long temp;
+	int temp;
 	unsigned int i;
 
 	if (!tz) {
@@ -517,7 +516,7 @@
 	struct thermal_zone_device *tz = data->tzd;
 	unsigned int status, trim_info;
 	unsigned int rising_threshold = 0, falling_threshold = 0;
-	unsigned long temp, temp_hist;
+	int temp, temp_hist;
 	int ret = 0, threshold_code, i, sensor_id, cal_type;
 
 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
@@ -610,7 +609,7 @@
 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
 	unsigned int trim_info = 0, con, rising_threshold;
 	int ret = 0, threshold_code;
-	unsigned long crit_temp = 0;
+	int crit_temp = 0;
 
 	/*
 	 * For exynos5440 soc triminfo value is swapped between TMU0 and
@@ -663,7 +662,7 @@
 	unsigned int status, trim_info;
 	unsigned int rising_threshold = 0, falling_threshold = 0;
 	int ret = 0, threshold_code, i;
-	unsigned long temp, temp_hist;
+	int temp, temp_hist;
 	unsigned int reg_off, bit_off;
 
 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
@@ -876,7 +875,7 @@
 	writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
 }
 
-static int exynos_get_temp(void *p, long *temp)
+static int exynos_get_temp(void *p, int *temp)
 {
 	struct exynos_tmu_data *data = p;
 
@@ -896,7 +895,7 @@
 
 #ifdef CONFIG_THERMAL_EMULATION
 static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
-			    unsigned long temp)
+			    int temp)
 {
 	if (temp) {
 		temp /= MCELSIUS;
@@ -926,7 +925,7 @@
 }
 
 static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
-					 unsigned long temp)
+					 int temp)
 {
 	unsigned int val;
 	u32 emul_con;
@@ -946,7 +945,7 @@
 }
 
 static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data,
-					 unsigned long temp)
+					 int temp)
 {
 	unsigned int val;
 
@@ -955,7 +954,7 @@
 	writel(val, data->base + EXYNOS5440_TMU_S0_7_DEBUG);
 }
 
-static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+static int exynos_tmu_set_emulation(void *drv_data, int temp)
 {
 	struct exynos_tmu_data *data = drv_data;
 	int ret = -EINVAL;
@@ -978,7 +977,7 @@
 #else
 #define exynos4412_tmu_set_emulation NULL
 #define exynos5440_tmu_set_emulation NULL
-static int exynos_tmu_set_emulation(void *drv_data,	unsigned long temp)
+static int exynos_tmu_set_emulation(void *drv_data, int temp)
 	{ return -EINVAL; }
 #endif /* CONFIG_THERMAL_EMULATION */
 
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index bddb717..534dd91 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -38,7 +38,7 @@
 };
 
 static inline int thermal_get_temp(struct thermal_zone_device *thermal,
-				unsigned long *temp)
+				int *temp)
 {
 	struct spear_thermal_dev *stdev = thermal->devdata;
 
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
index 88c759d..be637e6 100644
--- a/drivers/thermal/st/st_thermal.c
+++ b/drivers/thermal/st/st_thermal.c
@@ -111,8 +111,7 @@
 }
 
 /* Callback to get temperature from HW*/
-static int st_thermal_get_temp(struct thermal_zone_device *th,
-		unsigned long *temperature)
+static int st_thermal_get_temp(struct thermal_zone_device *th, int *temperature)
 {
 	struct st_thermal_sensor *sensor = th->devdata;
 	struct device *dev = sensor->dev;
@@ -159,7 +158,7 @@
 }
 
 static int st_thermal_get_trip_temp(struct thermal_zone_device *th,
-				    int trip, unsigned long *temp)
+				    int trip, int *temp)
 {
 	struct st_thermal_sensor *sensor = th->devdata;
 	struct device *dev = sensor->dev;
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 5a0f12d..2f9f708 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -113,7 +113,7 @@
 
 static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
 {
-	long trip_temp;
+	int trip_temp;
 	enum thermal_trip_type trip_type;
 	enum thermal_trend trend;
 	struct thermal_instance *instance;
@@ -135,7 +135,7 @@
 		trace_thermal_zone_trip(tz, trip, trip_type);
 	}
 
-	dev_dbg(&tz->device, "Trip%d[type=%d,temp=%ld]:trend=%d,throttle=%d\n",
+	dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
 				trip, trip_type, trip_temp, trend, throttle);
 
 	mutex_lock(&tz->lock);
diff --git a/drivers/thermal/tegra_soctherm.c b/drivers/thermal/tegra_soctherm.c
index 9197fc0..74ea576 100644
--- a/drivers/thermal/tegra_soctherm.c
+++ b/drivers/thermal/tegra_soctherm.c
@@ -293,7 +293,7 @@
  * H denotes an addition of 0.5 Celsius and N denotes negation
  * of the final value.
  */
-static long translate_temp(u16 val)
+static int translate_temp(u16 val)
 {
 	long t;
 
@@ -306,7 +306,7 @@
 	return t;
 }
 
-static int tegra_thermctl_get_temp(void *data, long *out_temp)
+static int tegra_thermctl_get_temp(void *data, int *out_temp)
 {
 	struct tegra_thermctl_zone *zone = data;
 	u32 val;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 4ca211b..d9e525c 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -426,7 +426,7 @@
 static void handle_critical_trips(struct thermal_zone_device *tz,
 				int trip, enum thermal_trip_type trip_type)
 {
-	long trip_temp;
+	int trip_temp;
 
 	tz->ops->get_trip_temp(tz, trip, &trip_temp);
 
@@ -465,7 +465,7 @@
 }
 
 /**
- * thermal_zone_get_temp() - returns its the temperature of thermal zone
+ * thermal_zone_get_temp() - returns the temperature of a thermal zone
  * @tz: a valid pointer to a struct thermal_zone_device
  * @temp: a valid pointer to where to store the resulting temperature.
  *
@@ -474,14 +474,12 @@
  *
  * Return: On success returns 0, an error code otherwise
  */
-int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
+int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
 {
 	int ret = -EINVAL;
-#ifdef CONFIG_THERMAL_EMULATION
 	int count;
-	unsigned long crit_temp = -1UL;
+	int crit_temp = INT_MAX;
 	enum thermal_trip_type type;
-#endif
 
 	if (!tz || IS_ERR(tz) || !tz->ops->get_temp)
 		goto exit;
@@ -489,25 +487,26 @@
 	mutex_lock(&tz->lock);
 
 	ret = tz->ops->get_temp(tz, temp);
-#ifdef CONFIG_THERMAL_EMULATION
-	if (!tz->emul_temperature)
-		goto skip_emul;
 
-	for (count = 0; count < tz->trips; count++) {
-		ret = tz->ops->get_trip_type(tz, count, &type);
-		if (!ret && type == THERMAL_TRIP_CRITICAL) {
-			ret = tz->ops->get_trip_temp(tz, count, &crit_temp);
-			break;
+	if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) {
+		for (count = 0; count < tz->trips; count++) {
+			ret = tz->ops->get_trip_type(tz, count, &type);
+			if (!ret && type == THERMAL_TRIP_CRITICAL) {
+				ret = tz->ops->get_trip_temp(tz, count,
+						&crit_temp);
+				break;
+			}
 		}
+
+		/*
+		 * Only allow emulating a temperature when the real temperature
+		 * is below the critical temperature so that the emulation code
+		 * cannot hide critical conditions.
+		 */
+		if (!ret && *temp < crit_temp)
+			*temp = tz->emul_temperature;
 	}
-
-	if (ret)
-		goto skip_emul;
-
-	if (*temp < crit_temp)
-		*temp = tz->emul_temperature;
-skip_emul:
-#endif
+ 
 	mutex_unlock(&tz->lock);
 exit:
 	return ret;
@@ -516,8 +515,7 @@
 
 static void update_temperature(struct thermal_zone_device *tz)
 {
-	long temp;
-	int ret;
+	int temp, ret;
 
 	ret = thermal_zone_get_temp(tz, &temp);
 	if (ret) {
@@ -577,15 +575,14 @@
 temp_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct thermal_zone_device *tz = to_thermal_zone(dev);
-	long temperature;
-	int ret;
+	int temperature, ret;
 
 	ret = thermal_zone_get_temp(tz, &temperature);
 
 	if (ret)
 		return ret;
 
-	return sprintf(buf, "%ld\n", temperature);
+	return sprintf(buf, "%d\n", temperature);
 }
 
 static ssize_t
@@ -689,7 +686,7 @@
 {
 	struct thermal_zone_device *tz = to_thermal_zone(dev);
 	int trip, ret;
-	long temperature;
+	int temperature;
 
 	if (!tz->ops->get_trip_temp)
 		return -EPERM;
@@ -702,7 +699,7 @@
 	if (ret)
 		return ret;
 
-	return sprintf(buf, "%ld\n", temperature);
+	return sprintf(buf, "%d\n", temperature);
 }
 
 static ssize_t
@@ -711,7 +708,7 @@
 {
 	struct thermal_zone_device *tz = to_thermal_zone(dev);
 	int trip, ret;
-	unsigned long temperature;
+	int temperature;
 
 	if (!tz->ops->set_trip_hyst)
 		return -EPERM;
@@ -719,7 +716,7 @@
 	if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip))
 		return -EINVAL;
 
-	if (kstrtoul(buf, 10, &temperature))
+	if (kstrtoint(buf, 10, &temperature))
 		return -EINVAL;
 
 	/*
@@ -738,7 +735,7 @@
 {
 	struct thermal_zone_device *tz = to_thermal_zone(dev);
 	int trip, ret;
-	unsigned long temperature;
+	int temperature;
 
 	if (!tz->ops->get_trip_hyst)
 		return -EPERM;
@@ -748,7 +745,7 @@
 
 	ret = tz->ops->get_trip_hyst(tz, trip, &temperature);
 
-	return ret ? ret : sprintf(buf, "%ld\n", temperature);
+	return ret ? ret : sprintf(buf, "%d\n", temperature);
 }
 
 static ssize_t
@@ -847,7 +844,27 @@
 	return sprintf(buf, "%s\n", tz->governor->name);
 }
 
-#ifdef CONFIG_THERMAL_EMULATION
+static ssize_t
+available_policies_show(struct device *dev, struct device_attribute *devattr,
+			char *buf)
+{
+	struct thermal_governor *pos;
+	ssize_t count = 0;
+	ssize_t size = PAGE_SIZE;
+
+	mutex_lock(&thermal_governor_lock);
+
+	list_for_each_entry(pos, &thermal_governor_list, governor_list) {
+		size = PAGE_SIZE - count;
+		count += scnprintf(buf + count, size, "%s ", pos->name);
+	}
+	count += scnprintf(buf + count, size, "\n");
+
+	mutex_unlock(&thermal_governor_lock);
+
+	return count;
+}
+
 static ssize_t
 emul_temp_store(struct device *dev, struct device_attribute *attr,
 		     const char *buf, size_t count)
@@ -873,7 +890,6 @@
 	return ret ? ret : count;
 }
 static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
-#endif/*CONFIG_THERMAL_EMULATION*/
 
 static ssize_t
 sustainable_power_show(struct device *dev, struct device_attribute *devattr,
@@ -997,6 +1013,34 @@
 }
 
 /**
+ * power_actor_get_min_power() - get the mainimum power that a cdev can consume
+ * @cdev:	pointer to &thermal_cooling_device
+ * @tz:		a valid thermal zone device pointer
+ * @min_power:	pointer in which to store the minimum power
+ *
+ * Calculate the minimum power consumption in milliwatts that the
+ * cooling device can currently consume and store it in @min_power.
+ *
+ * Return: 0 on success, -EINVAL if @cdev doesn't support the
+ * power_actor API or -E* on other error.
+ */
+int power_actor_get_min_power(struct thermal_cooling_device *cdev,
+			      struct thermal_zone_device *tz, u32 *min_power)
+{
+	unsigned long max_state;
+	int ret;
+
+	if (!cdev_is_power_actor(cdev))
+		return -EINVAL;
+
+	ret = cdev->ops->get_max_state(cdev, &max_state);
+	if (ret)
+		return ret;
+
+	return cdev->ops->state2power(cdev, tz, max_state, min_power);
+}
+
+/**
  * power_actor_set_power() - limit the maximum power that a cooling device can consume
  * @cdev:	pointer to &thermal_cooling_device
  * @instance:	thermal instance to update
@@ -1032,6 +1076,7 @@
 static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
 static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
 static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store);
+static DEVICE_ATTR(available_policies, S_IRUGO, available_policies_show, NULL);
 
 /* sys I/F for cooling device */
 #define to_cooling_device(_dev)	\
@@ -1803,11 +1848,12 @@
 			goto unregister;
 	}
 
-#ifdef CONFIG_THERMAL_EMULATION
-	result = device_create_file(&tz->device, &dev_attr_emul_temp);
-	if (result)
-		goto unregister;
-#endif
+	if (IS_ENABLED(CONFIG_THERMAL_EMULATION)) {
+		result = device_create_file(&tz->device, &dev_attr_emul_temp);
+		if (result)
+			goto unregister;
+	}
+
 	/* Create policy attribute */
 	result = device_create_file(&tz->device, &dev_attr_policy);
 	if (result)
@@ -1818,6 +1864,11 @@
 	if (result)
 		goto unregister;
 
+	/* Create available_policies attribute */
+	result = device_create_file(&tz->device, &dev_attr_available_policies);
+	if (result)
+		goto unregister;
+
 	/* Update 'this' zone's governor information */
 	mutex_lock(&thermal_governor_lock);
 
@@ -1849,9 +1900,6 @@
 
 	INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
 
-	if (!tz->ops->get_temp)
-		thermal_zone_device_set_polling(tz, 0);
-
 	thermal_zone_device_update(tz);
 
 	return tz;
@@ -1918,6 +1966,7 @@
 	if (tz->ops->get_mode)
 		device_remove_file(&tz->device, &dev_attr_mode);
 	device_remove_file(&tz->device, &dev_attr_policy);
+	device_remove_file(&tz->device, &dev_attr_available_policies);
 	remove_trip_attrs(tz);
 	thermal_set_governor(tz, NULL);
 
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 1967bee..06fd2ed9 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -69,7 +69,7 @@
 static ssize_t
 temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	long temperature;
+	int temperature;
 	int ret;
 	struct thermal_hwmon_attr *hwmon_attr
 			= container_of(attr, struct thermal_hwmon_attr, attr);
@@ -83,7 +83,7 @@
 	if (ret)
 		return ret;
 
-	return sprintf(buf, "%ld\n", temperature);
+	return sprintf(buf, "%d\n", temperature);
 }
 
 static ssize_t
@@ -95,14 +95,14 @@
 			= container_of(hwmon_attr, struct thermal_hwmon_temp,
 				       temp_crit);
 	struct thermal_zone_device *tz = temp->tz;
-	long temperature;
+	int temperature;
 	int ret;
 
 	ret = tz->ops->get_trip_temp(tz, 0, &temperature);
 	if (ret)
 		return ret;
 
-	return sprintf(buf, "%ld\n", temperature);
+	return sprintf(buf, "%d\n", temperature);
 }
 
 
@@ -142,7 +142,7 @@
 
 static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz)
 {
-	unsigned long temp;
+	int temp;
 	return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp);
 }
 
diff --git a/drivers/thermal/ti-soc-thermal/Kconfig b/drivers/thermal/ti-soc-thermal/Kconfig
index bd4c7be..cb6686f 100644
--- a/drivers/thermal/ti-soc-thermal/Kconfig
+++ b/drivers/thermal/ti-soc-thermal/Kconfig
@@ -1,7 +1,5 @@
 config TI_SOC_THERMAL
 	tristate "Texas Instruments SoCs temperature sensor driver"
-	depends on THERMAL
-	depends on ARCH_HAS_BANDGAP
 	help
 	  If you say yes here you get support for the Texas Instruments
 	  OMAP4460+ on die bandgap temperature sensor support. The register
@@ -24,7 +22,7 @@
 config OMAP4_THERMAL
 	bool "Texas Instruments OMAP4 thermal support"
 	depends on TI_SOC_THERMAL
-	depends on ARCH_OMAP4
+	depends on ARCH_OMAP4 || COMPILE_TEST
 	help
 	  If you say yes here you get thermal support for the Texas Instruments
 	  OMAP4 SoC family. The current chip supported are:
@@ -38,7 +36,7 @@
 config OMAP5_THERMAL
 	bool "Texas Instruments OMAP5 thermal support"
 	depends on TI_SOC_THERMAL
-	depends on SOC_OMAP5
+	depends on SOC_OMAP5 || COMPILE_TEST
 	help
 	  If you say yes here you get thermal support for the Texas Instruments
 	  OMAP5 SoC family. The current chip supported are:
@@ -50,7 +48,7 @@
 config DRA752_THERMAL
 	bool "Texas Instruments DRA752 thermal support"
 	depends on TI_SOC_THERMAL
-	depends on SOC_DRA7XX
+	depends on SOC_DRA7XX || COMPILE_TEST
 	help
 	  If you say yes here you get thermal support for the Texas Instruments
 	  DRA752 SoC family. The current chip supported are:
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index c7c5b37..b213a12 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -76,14 +76,14 @@
 
 /* thermal zone ops */
 /* Get temperature callback function for thermal zone */
-static inline int __ti_thermal_get_temp(void *devdata, long *temp)
+static inline int __ti_thermal_get_temp(void *devdata, int *temp)
 {
 	struct thermal_zone_device *pcb_tz = NULL;
 	struct ti_thermal_data *data = devdata;
 	struct ti_bandgap *bgp;
 	const struct ti_temp_sensor *s;
 	int ret, tmp, slope, constant;
-	unsigned long pcb_temp;
+	int pcb_temp;
 
 	if (!data)
 		return 0;
@@ -119,7 +119,7 @@
 }
 
 static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
-				      unsigned long *temp)
+				      int *temp)
 {
 	struct ti_thermal_data *data = thermal->devdata;
 
@@ -229,7 +229,7 @@
 
 /* Get trip temperature callback functions for thermal zone */
 static int ti_thermal_get_trip_temp(struct thermal_zone_device *thermal,
-				    int trip, unsigned long *temp)
+				    int trip, int *temp)
 {
 	if (!ti_thermal_is_valid_trip(trip))
 		return -EINVAL;
@@ -280,7 +280,7 @@
 
 /* Get critical temperature callback functions for thermal zone */
 static int ti_thermal_get_crit_temp(struct thermal_zone_device *thermal,
-				    unsigned long *temp)
+				    int *temp)
 {
 	/* shutdown zone */
 	return ti_thermal_get_trip_temp(thermal, OMAP_TRIP_NUMBER - 1, temp);
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 50d1d2c..7fc919f 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -164,7 +164,7 @@
 	return err;
 }
 
-static int sys_get_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp)
+static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
 {
 	u32 eax, edx;
 	struct phy_dev_entry *phy_dev_entry;
@@ -175,7 +175,7 @@
 	if (eax & 0x80000000) {
 		*temp = phy_dev_entry->tj_max -
 				((eax >> 16) & 0x7f) * 1000;
-		pr_debug("sys_get_curr_temp %ld\n", *temp);
+		pr_debug("sys_get_curr_temp %d\n", *temp);
 		return 0;
 	}
 
@@ -183,7 +183,7 @@
 }
 
 static int sys_get_trip_temp(struct thermal_zone_device *tzd,
-		int trip, unsigned long *temp)
+		int trip, int *temp)
 {
 	u32 eax, edx;
 	struct phy_dev_entry *phy_dev_entry;
@@ -214,13 +214,13 @@
 		*temp = phy_dev_entry->tj_max - thres_reg_value * 1000;
 	else
 		*temp = 0;
-	pr_debug("sys_get_trip_temp %ld\n", *temp);
+	pr_debug("sys_get_trip_temp %d\n", *temp);
 
 	return 0;
 }
 
 static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
-							unsigned long temp)
+							int temp)
 {
 	u32 l, h;
 	struct phy_dev_entry *phy_dev_entry;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index c68fe12..20a41f7 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -643,7 +643,7 @@
 	{
 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
 		.vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
-		.subvendor = 0x2222, .subdevice = 0x1111,
+		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
 	},
 	{ 0,}
 };
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index bb809cf..8b70a16 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -88,8 +88,8 @@
 };
 
 /* IUCV callback handler */
-static	int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
-static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
+static	int hvc_iucv_path_pending(struct iucv_path *, u8 *, u8 *);
+static void hvc_iucv_path_severed(struct iucv_path *, u8 *);
 static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
 static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
 
@@ -782,8 +782,8 @@
  *
  * Locking:	struct hvc_iucv_private->lock
  */
-static	int hvc_iucv_path_pending(struct iucv_path *path,
-				  u8 ipvmid[8], u8 ipuser[16])
+static	int hvc_iucv_path_pending(struct iucv_path *path, u8 *ipvmid,
+				  u8 *ipuser)
 {
 	struct hvc_iucv_private *priv, *tmp;
 	u8 wildcard[9] = "lnxhvc  ";
@@ -881,7 +881,7 @@
  *
  * Locking:	struct hvc_iucv_private->lock
  */
-static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
+static void hvc_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
 {
 	struct hvc_iucv_private *priv = path->private;
 
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 54e6c8d..b1e0ba3 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2910,3 +2910,5 @@
 }
 
 #endif /* CONFIG_SERIAL_8250_CONSOLE */
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 867e9f3..dcc50c87 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -61,7 +61,7 @@
 	{ .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
 	{ .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data},
 	{ .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data},
-	{ .compatible = "fsl,imx6sx-usb", .data = &imx6sl_usb_data},
+	{ .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data},
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index 9eae1a1..4456d2c 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -12,6 +12,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_platform.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
 #include <linux/usb/chipidea.h>
@@ -30,18 +31,36 @@
 	.flags		= CI_HDRC_DISABLE_STREAMING,
 };
 
+static struct ci_hdrc_platform_data ci_zynq_pdata = {
+	.capoffset	= DEF_CAPOFFSET,
+};
+
+static const struct of_device_id ci_hdrc_usb2_of_match[] = {
+	{ .compatible = "chipidea,usb2"},
+	{ .compatible = "xlnx,zynq-usb-2.20a", .data = &ci_zynq_pdata},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
+
 static int ci_hdrc_usb2_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct ci_hdrc_usb2_priv *priv;
 	struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev);
 	int ret;
+	const struct of_device_id *match;
 
 	if (!ci_pdata) {
 		ci_pdata = devm_kmalloc(dev, sizeof(*ci_pdata), GFP_KERNEL);
 		*ci_pdata = ci_default_pdata;	/* struct copy */
 	}
 
+	match = of_match_device(ci_hdrc_usb2_of_match, &pdev->dev);
+	if (match && match->data) {
+		/* struct copy */
+		*ci_pdata = *(struct ci_hdrc_platform_data *)match->data;
+	}
+
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
@@ -96,12 +115,6 @@
 	return 0;
 }
 
-static const struct of_device_id ci_hdrc_usb2_of_match[] = {
-	{ .compatible = "chipidea,usb2" },
-	{ }
-};
-MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
-
 static struct platform_driver ci_hdrc_usb2_driver = {
 	.probe	= ci_hdrc_usb2_probe,
 	.remove	= ci_hdrc_usb2_remove,
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index a637da2..8223fe7 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -656,6 +656,44 @@
 	return 0;
 }
 
+static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
+{
+	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
+	int direction, retval = 0;
+	unsigned long flags;
+
+	if (ep == NULL || hwep->ep.desc == NULL)
+		return -EINVAL;
+
+	if (usb_endpoint_xfer_isoc(hwep->ep.desc))
+		return -EOPNOTSUPP;
+
+	spin_lock_irqsave(hwep->lock, flags);
+
+	if (value && hwep->dir == TX && check_transfer &&
+		!list_empty(&hwep->qh.queue) &&
+			!usb_endpoint_xfer_control(hwep->ep.desc)) {
+		spin_unlock_irqrestore(hwep->lock, flags);
+		return -EAGAIN;
+	}
+
+	direction = hwep->dir;
+	do {
+		retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
+
+		if (!value)
+			hwep->wedge = 0;
+
+		if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
+			hwep->dir = (hwep->dir == TX) ? RX : TX;
+
+	} while (hwep->dir != direction);
+
+	spin_unlock_irqrestore(hwep->lock, flags);
+	return retval;
+}
+
+
 /**
  * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
  * @gadget: gadget
@@ -1051,7 +1089,7 @@
 				num += ci->hw_ep_max / 2;
 
 			spin_unlock(&ci->lock);
-			err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
+			err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
 			spin_lock(&ci->lock);
 			if (!err)
 				isr_setup_status_phase(ci);
@@ -1117,8 +1155,8 @@
 
 	if (err < 0) {
 		spin_unlock(&ci->lock);
-		if (usb_ep_set_halt(&hwep->ep))
-			dev_err(ci->dev, "error: ep_set_halt\n");
+		if (_ep_set_halt(&hwep->ep, 1, false))
+			dev_err(ci->dev, "error: _ep_set_halt\n");
 		spin_lock(&ci->lock);
 	}
 }
@@ -1149,9 +1187,9 @@
 					err = isr_setup_status_phase(ci);
 				if (err < 0) {
 					spin_unlock(&ci->lock);
-					if (usb_ep_set_halt(&hwep->ep))
+					if (_ep_set_halt(&hwep->ep, 1, false))
 						dev_err(ci->dev,
-							"error: ep_set_halt\n");
+						"error: _ep_set_halt\n");
 					spin_lock(&ci->lock);
 				}
 			}
@@ -1397,41 +1435,7 @@
  */
 static int ep_set_halt(struct usb_ep *ep, int value)
 {
-	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
-	int direction, retval = 0;
-	unsigned long flags;
-
-	if (ep == NULL || hwep->ep.desc == NULL)
-		return -EINVAL;
-
-	if (usb_endpoint_xfer_isoc(hwep->ep.desc))
-		return -EOPNOTSUPP;
-
-	spin_lock_irqsave(hwep->lock, flags);
-
-#ifndef STALL_IN
-	/* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
-	if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
-	    !list_empty(&hwep->qh.queue)) {
-		spin_unlock_irqrestore(hwep->lock, flags);
-		return -EAGAIN;
-	}
-#endif
-
-	direction = hwep->dir;
-	do {
-		retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
-
-		if (!value)
-			hwep->wedge = 0;
-
-		if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
-			hwep->dir = (hwep->dir == TX) ? RX : TX;
-
-	} while (hwep->dir != direction);
-
-	spin_unlock_irqrestore(hwep->lock, flags);
-	return retval;
+	return _ep_set_halt(ep, value, true);
 }
 
 /**
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index b2a540b..b9ddf0c 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -112,7 +112,7 @@
 				cfgno, inum, asnum, ep->desc.bEndpointAddress);
 		ep->ss_ep_comp.bmAttributes = 16;
 	} else if (usb_endpoint_xfer_isoc(&ep->desc) &&
-			desc->bmAttributes > 2) {
+		   USB_SS_MULT(desc->bmAttributes) > 3) {
 		dev_warn(ddev, "Isoc endpoint has Mult of %d in "
 				"config %d interface %d altsetting %d ep %d: "
 				"setting to 3\n", desc->bmAttributes + 1,
@@ -121,7 +121,8 @@
 	}
 
 	if (usb_endpoint_xfer_isoc(&ep->desc))
-		max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
+		max_tx = (desc->bMaxBurst + 1) *
+			(USB_SS_MULT(desc->bmAttributes)) *
 			usb_endpoint_maxp(&ep->desc);
 	else if (usb_endpoint_xfer_int(&ep->desc))
 		max_tx = usb_endpoint_maxp(&ep->desc) *
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index a5a1b7c..22e9606 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -514,8 +514,6 @@
 		goto err1;
 	}
 
-	dwc3_omap_enable_irqs(omap);
-
 	ret = dwc3_omap_extcon_register(omap);
 	if (ret < 0)
 		goto err2;
@@ -526,6 +524,8 @@
 		goto err3;
 	}
 
+	dwc3_omap_enable_irqs(omap);
+
 	return 0;
 
 err3:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0c25704..1e8bdf8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2665,8 +2665,6 @@
 	int				i;
 	irqreturn_t			ret = IRQ_NONE;
 
-	spin_lock(&dwc->lock);
-
 	for (i = 0; i < dwc->num_event_buffers; i++) {
 		irqreturn_t status;
 
@@ -2675,8 +2673,6 @@
 			ret = status;
 	}
 
-	spin_unlock(&dwc->lock);
-
 	return ret;
 }
 
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 978435a..6399c10 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -186,6 +186,7 @@
 
 	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
 		ep->claimed = false;
+		ep->driver_data = NULL;
 	}
 	gadget->in_epnum = 0;
 	gadget->out_epnum = 0;
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c
index fdacddb..175ca93 100644
--- a/drivers/usb/gadget/udc/amd5536udc.c
+++ b/drivers/usb/gadget/udc/amd5536udc.c
@@ -3138,8 +3138,8 @@
 	writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
 	if (dev->irq_registered)
 		free_irq(pdev->irq, dev);
-	if (dev->regs)
-		iounmap(dev->regs);
+	if (dev->virt_addr)
+		iounmap(dev->virt_addr);
 	if (dev->mem_region)
 		release_mem_region(pci_resource_start(pdev, 0),
 				pci_resource_len(pdev, 0));
@@ -3226,17 +3226,13 @@
 
 	/* init */
 	dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
-	if (!dev) {
-		retval = -ENOMEM;
-		goto finished;
-	}
+	if (!dev)
+		return -ENOMEM;
 
 	/* pci setup */
 	if (pci_enable_device(pdev) < 0) {
-		kfree(dev);
-		dev = NULL;
 		retval = -ENODEV;
-		goto finished;
+		goto err_pcidev;
 	}
 	dev->active = 1;
 
@@ -3246,28 +3242,22 @@
 
 	if (!request_mem_region(resource, len, name)) {
 		dev_dbg(&pdev->dev, "pci device used already\n");
-		kfree(dev);
-		dev = NULL;
 		retval = -EBUSY;
-		goto finished;
+		goto err_memreg;
 	}
 	dev->mem_region = 1;
 
 	dev->virt_addr = ioremap_nocache(resource, len);
 	if (dev->virt_addr == NULL) {
 		dev_dbg(&pdev->dev, "start address cannot be mapped\n");
-		kfree(dev);
-		dev = NULL;
 		retval = -EFAULT;
-		goto finished;
+		goto err_ioremap;
 	}
 
 	if (!pdev->irq) {
 		dev_err(&pdev->dev, "irq not set\n");
-		kfree(dev);
-		dev = NULL;
 		retval = -ENODEV;
-		goto finished;
+		goto err_irq;
 	}
 
 	spin_lock_init(&dev->lock);
@@ -3283,10 +3273,8 @@
 
 	if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
 		dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
-		kfree(dev);
-		dev = NULL;
 		retval = -EBUSY;
-		goto finished;
+		goto err_irq;
 	}
 	dev->irq_registered = 1;
 
@@ -3314,8 +3302,17 @@
 		return 0;
 
 finished:
-	if (dev)
-		udc_pci_remove(pdev);
+	udc_pci_remove(pdev);
+	return retval;
+
+err_irq:
+	iounmap(dev->virt_addr);
+err_ioremap:
+	release_mem_region(resource, len);
+err_memreg:
+	pci_disable_device(pdev);
+err_pcidev:
+	kfree(dev);
 	return retval;
 }
 
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 3dfada8..f0f2b06 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2002,6 +2002,17 @@
 		ep->udc = udc;
 		INIT_LIST_HEAD(&ep->queue);
 
+		if (ep->index == 0) {
+			ep->ep.caps.type_control = true;
+		} else {
+			ep->ep.caps.type_iso = ep->can_isoc;
+			ep->ep.caps.type_bulk = true;
+			ep->ep.caps.type_int = true;
+		}
+
+		ep->ep.caps.dir_in = true;
+		ep->ep.caps.dir_out = true;
+
 		if (i)
 			list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
 
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 5c8f4ef..ccb9c21 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -324,8 +324,7 @@
 				bdc->scratchpad.buff, bdc->scratchpad.sp_dma);
 
 	/* Destroy the dma pools */
-	if (bdc->bd_table_pool)
-		dma_pool_destroy(bdc->bd_table_pool);
+	dma_pool_destroy(bdc->bd_table_pool);
 
 	/* Free the bdc_ep array */
 	kfree(bdc->bdc_ep_array);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 1379ad4..27af0f0 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1348,6 +1348,7 @@
 {
 	struct dummy		*dum = dum_hcd->dum;
 	struct dummy_request	*req;
+	int			sent = 0;
 
 top:
 	/* if there's no request queued, the device is NAKing; return */
@@ -1385,12 +1386,15 @@
 			if (len == 0)
 				break;
 
-			/* use an extra pass for the final short packet */
-			if (len > ep->ep.maxpacket) {
-				rescan = 1;
-				len -= (len % ep->ep.maxpacket);
+			/* send multiple of maxpacket first, then remainder */
+			if (len >= ep->ep.maxpacket) {
+				is_short = 0;
+				if (len % ep->ep.maxpacket)
+					rescan = 1;
+				len -= len % ep->ep.maxpacket;
+			} else {
+				is_short = 1;
 			}
-			is_short = (len % ep->ep.maxpacket) != 0;
 
 			len = dummy_perform_transfer(urb, req, len);
 
@@ -1399,6 +1403,7 @@
 				req->req.status = len;
 			} else {
 				limit -= len;
+				sent += len;
 				urb->actual_length += len;
 				req->req.actual += len;
 			}
@@ -1421,7 +1426,7 @@
 					*status = -EOVERFLOW;
 				else
 					*status = 0;
-			} else if (!to_host) {
+			} else {
 				*status = 0;
 				if (host_len > dev_len)
 					req->req.status = -EOVERFLOW;
@@ -1429,15 +1434,24 @@
 					req->req.status = 0;
 			}
 
-		/* many requests terminate without a short packet */
+		/*
+		 * many requests terminate without a short packet.
+		 * send a zlp if demanded by flags.
+		 */
 		} else {
-			if (req->req.length == req->req.actual
-					&& !req->req.zero)
-				req->req.status = 0;
-			if (urb->transfer_buffer_length == urb->actual_length
-					&& !(urb->transfer_flags
-						& URB_ZERO_PACKET))
-				*status = 0;
+			if (req->req.length == req->req.actual) {
+				if (req->req.zero && to_host)
+					rescan = 1;
+				else
+					req->req.status = 0;
+			}
+			if (urb->transfer_buffer_length == urb->actual_length) {
+				if (urb->transfer_flags & URB_ZERO_PACKET &&
+				    !to_host)
+					rescan = 1;
+				else
+					*status = 0;
+			}
 		}
 
 		/* device side completion --> continuable */
@@ -1460,7 +1474,7 @@
 		if (rescan)
 			goto top;
 	}
-	return limit;
+	return sent;
 }
 
 static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep)
@@ -1890,7 +1904,7 @@
 		default:
 treat_control_like_bulk:
 			ep->last_io = jiffies;
-			total = transfer(dum_hcd, urb, ep, limit, &status);
+			total -= transfer(dum_hcd, urb, ep, limit, &status);
 			break;
 		}
 
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 8aa2593..b9429bc 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2117,8 +2117,7 @@
 		return -EBUSY;
 
 	gr_dfs_delete(dev);
-	if (dev->desc_pool)
-		dma_pool_destroy(dev->desc_pool);
+	dma_pool_destroy(dev->desc_pool);
 	platform_set_drvdata(pdev, NULL);
 
 	gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 4c48969..dafe74e 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1767,8 +1767,7 @@
 	usb_del_gadget_udc(&u3d->gadget);
 
 	/* free memory allocated in probe */
-	if (u3d->trb_pool)
-		dma_pool_destroy(u3d->trb_pool);
+	dma_pool_destroy(u3d->trb_pool);
 
 	if (u3d->ep_context)
 		dma_free_coherent(&dev->dev, u3d->ep_context_size,
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 339af51..81b6229 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2100,8 +2100,7 @@
 	}
 
 	/* free memory allocated in probe */
-	if (udc->dtd_pool)
-		dma_pool_destroy(udc->dtd_pool);
+	dma_pool_destroy(udc->dtd_pool);
 
 	if (udc->ep_dqh)
 		dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 9a8c936..41f841f 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1498,10 +1498,10 @@
 	 * use Event Data TRBs, and we don't chain in a link TRB on short
 	 * transfers, we're basically dividing by 1.
 	 *
-	 * xHCI 1.0 specification indicates that the Average TRB Length should
-	 * be set to 8 for control endpoints.
+	 * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
+	 * should be set to 8 for control endpoints.
 	 */
-	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
+	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
 		ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
 	else
 		ep_ctx->tx_info |=
@@ -1792,8 +1792,7 @@
 	int size;
 	int i, j, num_ports;
 
-	if (timer_pending(&xhci->cmd_timer))
-		del_timer_sync(&xhci->cmd_timer);
+	del_timer_sync(&xhci->cmd_timer);
 
 	/* Free the Event Ring Segment Table and the actual Event Ring */
 	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -2321,6 +2320,10 @@
 
 	INIT_LIST_HEAD(&xhci->cmd_list);
 
+	/* init command timeout timer */
+	setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
+		    (unsigned long)xhci);
+
 	page_size = readl(&xhci->op_regs->page_size);
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 			"Supported page size register = 0x%x", page_size);
@@ -2505,10 +2508,6 @@
 			"Wrote ERST address to ir_set 0.");
 	xhci_print_ir_set(xhci, 0);
 
-	/* init command timeout timer */
-	setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
-		    (unsigned long)xhci);
-
 	/*
 	 * XXX: Might need to set the Interrupter Moderation Register to
 	 * something other than the default (~1ms minimum between interrupts).
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5590eac..c79d336 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -180,51 +180,6 @@
 				"QUIRK: Resetting on resume");
 }
 
-/*
- * In some Intel xHCI controllers, in order to get D3 working,
- * through a vendor specific SSIC CONFIG register at offset 0x883c,
- * SSIC PORT need to be marked as "unused" before putting xHCI
- * into D3. After D3 exit, the SSIC port need to be marked as "used".
- * Without this change, xHCI might not enter D3 state.
- * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
- * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
- */
-static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
-{
-	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
-	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
-	u32 val;
-	void __iomem *reg;
-
-	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
-		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
-
-		reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
-
-		/* Notify SSIC that SSIC profile programming is not done */
-		val = readl(reg) & ~PROG_DONE;
-		writel(val, reg);
-
-		/* Mark SSIC port as unused(suspend) or used(resume) */
-		val = readl(reg);
-		if (suspend)
-			val |= SSIC_PORT_UNUSED;
-		else
-			val &= ~SSIC_PORT_UNUSED;
-		writel(val, reg);
-
-		/* Notify SSIC that SSIC profile programming is done */
-		val = readl(reg) | PROG_DONE;
-		writel(val, reg);
-		readl(reg);
-	}
-
-	reg = (void __iomem *) xhci->cap_regs + 0x80a4;
-	val = readl(reg);
-	writel(val | BIT(28), reg);
-	readl(reg);
-}
-
 #ifdef CONFIG_ACPI
 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
 {
@@ -345,6 +300,51 @@
 }
 
 #ifdef CONFIG_PM
+/*
+ * In some Intel xHCI controllers, in order to get D3 working,
+ * through a vendor specific SSIC CONFIG register at offset 0x883c,
+ * SSIC PORT need to be marked as "unused" before putting xHCI
+ * into D3. After D3 exit, the SSIC port need to be marked as "used".
+ * Without this change, xHCI might not enter D3 state.
+ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
+ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
+ */
+static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
+	u32 val;
+	void __iomem *reg;
+
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+
+		reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
+
+		/* Notify SSIC that SSIC profile programming is not done */
+		val = readl(reg) & ~PROG_DONE;
+		writel(val, reg);
+
+		/* Mark SSIC port as unused(suspend) or used(resume) */
+		val = readl(reg);
+		if (suspend)
+			val |= SSIC_PORT_UNUSED;
+		else
+			val &= ~SSIC_PORT_UNUSED;
+		writel(val, reg);
+
+		/* Notify SSIC that SSIC profile programming is done */
+		val = readl(reg) | PROG_DONE;
+		writel(val, reg);
+		readl(reg);
+	}
+
+	reg = (void __iomem *) xhci->cap_regs + 0x80a4;
+	val = readl(reg);
+	writel(val | BIT(28), reg);
+	readl(reg);
+}
+
 static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
 {
 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a47a1e8..43291f9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -302,6 +302,15 @@
 	ret = xhci_handshake(&xhci->op_regs->cmd_ring,
 			CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
 	if (ret < 0) {
+		/* we are about to kill xhci, give it one more chance */
+		xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+			      &xhci->op_regs->cmd_ring);
+		udelay(1000);
+		ret = xhci_handshake(&xhci->op_regs->cmd_ring,
+				     CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
+		if (ret == 0)
+			return 0;
+
 		xhci_err(xhci, "Stopped the command ring failed, "
 				"maybe the host is dead\n");
 		xhci->xhc_state |= XHCI_STATE_DYING;
@@ -3461,8 +3470,8 @@
 	if (start_cycle == 0)
 		field |= 0x1;
 
-	/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
-	if (xhci->hci_version == 0x100) {
+	/* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
+	if (xhci->hci_version >= 0x100) {
 		if (urb->transfer_buffer_length > 0) {
 			if (setup->bRequestType & USB_DIR_IN)
 				field |= TRB_TX_TYPE(TRB_DATA_IN);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6b0f4a4..9957bd9 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -146,7 +146,8 @@
 				"waited %u microseconds.\n",
 				XHCI_MAX_HALT_USEC);
 	if (!ret)
-		xhci->xhc_state &= ~XHCI_STATE_HALTED;
+		xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+
 	return ret;
 }
 
@@ -654,15 +655,6 @@
 }
 EXPORT_SYMBOL_GPL(xhci_run);
 
-static void xhci_only_stop_hcd(struct usb_hcd *hcd)
-{
-	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
-	spin_lock_irq(&xhci->lock);
-	xhci_halt(xhci);
-	spin_unlock_irq(&xhci->lock);
-}
-
 /*
  * Stop xHCI driver.
  *
@@ -677,12 +669,14 @@
 	u32 temp;
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
-	if (!usb_hcd_is_primary_hcd(hcd)) {
-		xhci_only_stop_hcd(xhci->shared_hcd);
+	if (xhci->xhc_state & XHCI_STATE_HALTED)
 		return;
-	}
 
+	mutex_lock(&xhci->mutex);
 	spin_lock_irq(&xhci->lock);
+	xhci->xhc_state |= XHCI_STATE_HALTED;
+	xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+
 	/* Make sure the xHC is halted for a USB3 roothub
 	 * (xhci_stop() could be called as part of failed init).
 	 */
@@ -717,6 +711,7 @@
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 			"xhci_stop completed - status = %x",
 			readl(&xhci->op_regs->status));
+	mutex_unlock(&xhci->mutex);
 }
 
 /*
@@ -3793,6 +3788,9 @@
 
 	mutex_lock(&xhci->mutex);
 
+	if (xhci->xhc_state)	/* dying or halted */
+		goto out;
+
 	if (!udev->slot_id) {
 		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
 				"Bad Slot ID %d", udev->slot_id);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 514a6cd..4a518ff 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1051,6 +1051,7 @@
 	 * (c) peripheral initiates, using SRP
 	 */
 	if (musb->port_mode != MUSB_PORT_MODE_HOST &&
+			musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON &&
 			(devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
 		musb->is_active = 1;
 	} else {
@@ -2448,6 +2449,9 @@
 	struct musb	*musb = dev_to_musb(dev);
 	unsigned long	flags;
 
+	musb_platform_disable(musb);
+	musb_generic_disable(musb);
+
 	spin_lock_irqsave(&musb->lock, flags);
 
 	if (is_peripheral_active(musb)) {
@@ -2501,6 +2505,9 @@
 	pm_runtime_disable(dev);
 	pm_runtime_set_active(dev);
 	pm_runtime_enable(dev);
+
+	musb_start(musb);
+
 	return 0;
 }
 
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index d07cafb..e499b86 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -551,6 +551,9 @@
 	} else {
 		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
 
+		/* delay to drain to cppi dma pipeline for isoch */
+		udelay(250);
+
 		csr = musb_readw(epio, MUSB_RXCSR);
 		csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
 		musb_writew(epio, MUSB_RXCSR, csr);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index a0cfead..84512d1 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -225,8 +225,11 @@
 
 	dsps_writel(reg_base, wrp->epintr_set, epmask);
 	dsps_writel(reg_base, wrp->coreintr_set, coremask);
-	/* start polling for ID change. */
-	mod_timer(&glue->timer, jiffies + msecs_to_jiffies(wrp->poll_timeout));
+	/* start polling for ID change in dual-role idle mode */
+	if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
+			musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
+		mod_timer(&glue->timer, jiffies +
+				msecs_to_jiffies(wrp->poll_timeout));
 	dsps_musb_try_idle(musb, 0);
 }
 
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index 39168fe..b2685e7 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -379,6 +379,8 @@
         {}
 };
 
+MODULE_DEVICE_TABLE(of, ux500_match);
+
 static struct platform_driver ux500_driver = {
 	.probe		= ux500_probe,
 	.remove		= ux500_remove,
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 7d3beee..1731324 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -155,7 +155,7 @@
 config USB_QCOM_8X16_PHY
 	tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support"
 	depends on ARCH_QCOM || COMPILE_TEST
-	depends on RESET_CONTROLLER
+	depends on RESET_CONTROLLER && EXTCON
 	select USB_PHY
 	select USB_ULPI_VIEWPORT
 	help
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index ec6ecd0..5320cb8 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -232,7 +232,8 @@
 		clk_rate = pdata->clk_rate;
 		needs_vcc = pdata->needs_vcc;
 		if (gpio_is_valid(pdata->gpio_reset)) {
-			err = devm_gpio_request_one(dev, pdata->gpio_reset, 0,
+			err = devm_gpio_request_one(dev, pdata->gpio_reset,
+						    GPIOF_ACTIVE_LOW,
 						    dev_name(dev));
 			if (!err)
 				nop->gpiod_reset =
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index 8a55b37..db68156 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -31,6 +31,7 @@
 	{ "isp1301", 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, isp1301_id);
 
 static struct i2c_client *isp1301_i2c_client;
 
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6d1941a..6956c4f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -278,6 +278,10 @@
 #define ZTE_PRODUCT_MF622			0x0001
 #define ZTE_PRODUCT_MF628			0x0015
 #define ZTE_PRODUCT_MF626			0x0031
+#define ZTE_PRODUCT_ZM8620_X			0x0396
+#define ZTE_PRODUCT_ME3620_MBIM			0x0426
+#define ZTE_PRODUCT_ME3620_X			0x1432
+#define ZTE_PRODUCT_ME3620_L			0x1433
 #define ZTE_PRODUCT_AC2726			0xfff1
 #define ZTE_PRODUCT_MG880			0xfffd
 #define ZTE_PRODUCT_CDMA_TECH			0xfffe
@@ -544,6 +548,18 @@
 	.sendsetup = BIT(1) | BIT(2) | BIT(3),
 };
 
+static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
+	.reserved = BIT(2) | BIT(3) | BIT(4),
+};
+
+static const struct option_blacklist_info zte_me3620_xl_blacklist = {
+	.reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
+static const struct option_blacklist_info zte_zm8620_x_blacklist = {
+	.reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
 static const struct option_blacklist_info huawei_cdc12_blacklist = {
 	.reserved = BIT(1) | BIT(2),
 };
@@ -1591,6 +1607,14 @@
 	 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
 	 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
+	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
+	 .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
+	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
+	 .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 6c3734d..d3ea90b 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -80,6 +80,8 @@
 static int  whiteheat_firmware_attach(struct usb_serial *serial);
 
 /* function prototypes for the Connect Tech WhiteHEAT serial converter */
+static int whiteheat_probe(struct usb_serial *serial,
+				const struct usb_device_id *id);
 static int  whiteheat_attach(struct usb_serial *serial);
 static void whiteheat_release(struct usb_serial *serial);
 static int  whiteheat_port_probe(struct usb_serial_port *port);
@@ -116,6 +118,7 @@
 	.description =		"Connect Tech - WhiteHEAT",
 	.id_table =		id_table_std,
 	.num_ports =		4,
+	.probe =		whiteheat_probe,
 	.attach =		whiteheat_attach,
 	.release =		whiteheat_release,
 	.port_probe =		whiteheat_port_probe,
@@ -217,6 +220,34 @@
 /*****************************************************************************
  * Connect Tech's White Heat serial driver functions
  *****************************************************************************/
+
+static int whiteheat_probe(struct usb_serial *serial,
+				const struct usb_device_id *id)
+{
+	struct usb_host_interface *iface_desc;
+	struct usb_endpoint_descriptor *endpoint;
+	size_t num_bulk_in = 0;
+	size_t num_bulk_out = 0;
+	size_t min_num_bulk;
+	unsigned int i;
+
+	iface_desc = serial->interface->cur_altsetting;
+
+	for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
+		endpoint = &iface_desc->endpoint[i].desc;
+		if (usb_endpoint_is_bulk_in(endpoint))
+			++num_bulk_in;
+		if (usb_endpoint_is_bulk_out(endpoint))
+			++num_bulk_out;
+	}
+
+	min_num_bulk = COMMAND_PORT + 1;
+	if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
+		return -ENODEV;
+
+	return 0;
+}
+
 static int whiteheat_attach(struct usb_serial *serial)
 {
 	struct usb_serial_port *command_port;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7d137a4..9eda69e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -61,8 +61,7 @@
 enum {
 	VHOST_NET_FEATURES = VHOST_FEATURES |
 			 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
-			 (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
-			 (1ULL << VIRTIO_F_VERSION_1),
+			 (1ULL << VIRTIO_NET_F_MRG_RXBUF)
 };
 
 enum {
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index f114a9d..e25a236 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -166,9 +166,7 @@
 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 enum {
 	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
-					       (1ULL << VIRTIO_SCSI_F_T10_PI) |
-					       (1ULL << VIRTIO_F_ANY_LAYOUT) |
-					       (1ULL << VIRTIO_F_VERSION_1)
+					       (1ULL << VIRTIO_SCSI_F_T10_PI)
 };
 
 #define VHOST_SCSI_MAX_TARGET	256
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index d9c501e..f2882ac 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -277,10 +277,13 @@
 			return -EFAULT;
 		return 0;
 	case VHOST_SET_FEATURES:
+		printk(KERN_ERR "1\n");
 		if (copy_from_user(&features, featurep, sizeof features))
 			return -EFAULT;
+		printk(KERN_ERR "2\n");
 		if (features & ~VHOST_FEATURES)
 			return -EOPNOTSUPP;
+		printk(KERN_ERR "3\n");
 		return vhost_test_set_features(n, features);
 	case VHOST_RESET_OWNER:
 		return vhost_test_reset_owner(n);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index ce6f6da..4772862 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -173,7 +173,9 @@
 	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
 			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
 			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
-			 (1ULL << VHOST_F_LOG_ALL),
+			 (1ULL << VHOST_F_LOG_ALL) |
+			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
+			 (1ULL << VIRTIO_F_VERSION_1)
 };
 
 static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 55c4b5b..79e1aa1 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -188,6 +188,15 @@
 	  Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will
 	  reboot your system when the timeout is reached.
 
+config SAMA5D4_WATCHDOG
+	tristate "Atmel SAMA5D4 Watchdog Timer"
+	depends on ARCH_AT91
+	select WATCHDOG_CORE
+	help
+	  Atmel SAMA5D4 watchdog timer is embedded into SAMA5D4 chips.
+	  Its Watchdog Timer Mode Register can be written more than once.
+	  This will reboot your system when the timeout is reached.
+
 config CADENCE_WATCHDOG
 	tristate "Cadence Watchdog Timer"
 	depends on HAS_IOMEM
@@ -558,6 +567,17 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called digicolor_wdt.
 
+config LPC18XX_WATCHDOG
+	tristate "LPC18xx/43xx Watchdog"
+	depends on ARCH_LPC18XX || COMPILE_TEST
+	select WATCHDOG_CORE
+	help
+	  Say Y here if to include support for the watchdog timer
+	  in NXP LPC SoCs family, which includes LPC18xx/LPC43xx
+	  processors.
+	  To compile this driver as a module, choose M here: the
+	  module will be called lpc18xx_wdt.
+
 # AVR32 Architecture
 
 config AT32AP700X_WDT
@@ -797,8 +817,9 @@
 	tristate "Intel TCO Timer/Watchdog"
 	depends on (X86 || IA64) && PCI
 	select WATCHDOG_CORE
+	depends on I2C || I2C=n
 	select LPC_ICH if !EXPERT
-	select I2C_I801 if !EXPERT
+	select I2C_I801 if !EXPERT && I2C
 	---help---
 	  Hardware driver for the intel TCO timer based watchdog devices.
 	  These drivers are included in the Intel 82801 I/O Controller
@@ -1334,7 +1355,7 @@
 
 config 8xxx_WDT
 	tristate "MPC8xxx Platform Watchdog Timer"
-	depends on PPC_8xx || PPC_83xx || PPC_86xx
+	depends on PPC_8xx || PPC_83xx || PPC_86xx || PPC_MPC512x
 	select WATCHDOG_CORE
 	help
 	  This driver is for a SoC level watchdog that exists on some
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 59ea9a1..0c616e3 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -41,6 +41,7 @@
 obj-$(CONFIG_KS8695_WATCHDOG) += ks8695_wdt.o
 obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o
 obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o
+obj-$(CONFIG_SAMA5D4_WATCHDOG) += sama5d4_wdt.o
 obj-$(CONFIG_DW_WATCHDOG) += dw_wdt.o
 obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
 obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
@@ -66,6 +67,7 @@
 obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o
 obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o
 obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o
+obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
 
 # AVR32 Architecture
 obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 9ba1153..e12a797 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -244,7 +244,7 @@
 	}
 
 	regmap_st = syscon_node_to_regmap(parent->of_node);
-	if (!regmap_st)
+	if (IS_ERR(regmap_st))
 		return -ENODEV;
 
 	res = misc_register(&at91wdt_miscdev);
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index e4698f7..7e6acaf 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -17,6 +17,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/clk.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
@@ -90,6 +91,7 @@
 	unsigned long heartbeat;	/* WDT heartbeat in jiffies */
 	bool nowayout;
 	unsigned int irq;
+	struct clk *sclk;
 };
 
 /* ......................................................................... */
@@ -352,15 +354,25 @@
 	if (IS_ERR(wdt->base))
 		return PTR_ERR(wdt->base);
 
+	wdt->sclk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(wdt->sclk))
+		return PTR_ERR(wdt->sclk);
+
+	err = clk_prepare_enable(wdt->sclk);
+	if (err) {
+		dev_err(&pdev->dev, "Could not enable slow clock\n");
+		return err;
+	}
+
 	if (pdev->dev.of_node) {
 		err = of_at91wdt_init(pdev->dev.of_node, wdt);
 		if (err)
-			return err;
+			goto err_clk;
 	}
 
 	err = at91_wdt_init(pdev, wdt);
 	if (err)
-		return err;
+		goto err_clk;
 
 	platform_set_drvdata(pdev, wdt);
 
@@ -368,6 +380,11 @@
 		wdt->wdd.timeout, wdt->nowayout);
 
 	return 0;
+
+err_clk:
+	clk_disable_unprepare(wdt->sclk);
+
+	return err;
 }
 
 static int __exit at91wdt_remove(struct platform_device *pdev)
@@ -377,6 +394,7 @@
 
 	pr_warn("I quit now, hardware will probably reboot!\n");
 	del_timer(&wdt->timer);
+	clk_disable_unprepare(wdt->sclk);
 
 	return 0;
 }
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
index c6fbb2e6..b79a83b 100644
--- a/drivers/watchdog/at91sam9_wdt.h
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -22,11 +22,13 @@
 
 #define AT91_WDT_MR		0x04			/* Watchdog Mode Register */
 #define		AT91_WDT_WDV		(0xfff << 0)		/* Counter Value */
+#define			AT91_WDT_SET_WDV(x)	((x) & AT91_WDT_WDV)
 #define		AT91_WDT_WDFIEN		(1     << 12)		/* Fault Interrupt Enable */
 #define		AT91_WDT_WDRSTEN	(1     << 13)		/* Reset Processor */
 #define		AT91_WDT_WDRPROC	(1     << 14)		/* Timer Restart */
 #define		AT91_WDT_WDDIS		(1     << 15)		/* Watchdog Disable */
 #define		AT91_WDT_WDD		(0xfff << 16)		/* Delta Value */
+#define			AT91_WDT_SET_WDD(x)	(((x) << 16) & AT91_WDT_WDD)
 #define		AT91_WDT_WDDBGHLT	(1     << 28)		/* Debug Halt */
 #define		AT91_WDT_WDIDLEHLT	(1     << 29)		/* Idle Halt */
 
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 7116968..8a5ce5b 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -36,6 +36,13 @@
 #define PM_RSTC_WRCFG_FULL_RESET	0x00000020
 #define PM_RSTC_RESET			0x00000102
 
+/*
+ * The Raspberry Pi firmware uses the RSTS register to know which partiton
+ * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10.
+ * Partiton 63 is a special partition used by the firmware to indicate halt.
+ */
+#define PM_RSTS_RASPBERRYPI_HALT	0x555
+
 #define SECS_TO_WDOG_TICKS(x) ((x) << 16)
 #define WDOG_TICKS_TO_SECS(x) ((x) >> 16)
 
@@ -151,8 +158,7 @@
 	 * hard reset.
 	 */
 	val = readl_relaxed(wdt->base + PM_RSTS);
-	val &= PM_RSTC_WRCFG_CLR;
-	val |= PM_PASSWORD | PM_RSTS_HADWRH_SET;
+	val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT;
 	writel_relaxed(val, wdt->base + PM_RSTS);
 
 	/* Continue with normal reset mechanism */
@@ -182,6 +188,7 @@
 	watchdog_set_drvdata(&bcm2835_wdt_wdd, wdt);
 	watchdog_init_timeout(&bcm2835_wdt_wdd, heartbeat, dev);
 	watchdog_set_nowayout(&bcm2835_wdt_wdd, nowayout);
+	bcm2835_wdt_wdd.parent = &pdev->dev;
 	err = watchdog_register_device(&bcm2835_wdt_wdd);
 	if (err) {
 		dev_err(dev, "Failed to register watchdog device");
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index b28a072..4064a43 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -209,6 +209,7 @@
 
 	wdt->wdd.info = &bcm47xx_wdt_info;
 	wdt->wdd.timeout = WDT_DEFAULT_TIME;
+	wdt->wdd.parent = &pdev->dev;
 	ret = wdt->wdd.ops->set_timeout(&wdt->wdd, timeout);
 	if (ret)
 		goto err_timer;
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index 22d8ae6..e0c9842 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -319,6 +319,7 @@
 	spin_lock_init(&wdt->lock);
 	platform_set_drvdata(pdev, wdt);
 	watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
+	bcm_kona_wdt_wdd.parent = &pdev->dev;
 
 	ret = bcm_kona_wdt_set_timeout_reg(&bcm_kona_wdt_wdd, 0);
 	if (ret) {
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index e96b09b..04da4b6 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -186,8 +186,6 @@
 static int booke_wdt_set_timeout(struct watchdog_device *wdt_dev,
 				 unsigned int timeout)
 {
-	if (timeout > MAX_WDT_TIMEOUT)
-		return -EINVAL;
 	wdt_dev->timeout = timeout;
 	booke_wdt_set(wdt_dev);
 
@@ -211,7 +209,6 @@
 	.info = &booke_wdt_info,
 	.ops = &booke_wdt_ops,
 	.min_timeout = 1,
-	.max_timeout = 0xFFFF
 };
 
 static void __exit booke_wdt_exit(void)
@@ -229,6 +226,7 @@
 	booke_wdt_set_timeout(&booke_wdt_dev,
 			      period_to_sec(booke_wdt_period));
 	watchdog_set_nowayout(&booke_wdt_dev, nowayout);
+	booke_wdt_dev.max_timeout = MAX_WDT_TIMEOUT;
 	if (booke_wdt_enabled)
 		booke_wdt_start(&booke_wdt_dev);
 
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index ce12f43..a099b77 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -358,6 +358,7 @@
 	if (ret < 0)
 		coh901327_wdt.timeout = 60;
 
+	coh901327_wdt.parent = &pdev->dev;
 	ret = watchdog_register_device(&coh901327_wdt);
 	if (ret == 0)
 		dev_info(&pdev->dev,
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index 2e95896..67e6797 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -195,6 +195,7 @@
 	da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
 	da9052_wdt->info = &da9052_wdt_info;
 	da9052_wdt->ops = &da9052_wdt_ops;
+	da9052_wdt->parent = &pdev->dev;
 	watchdog_set_drvdata(da9052_wdt, driver_data);
 
 	kref_init(&driver_data->kref);
diff --git a/drivers/watchdog/da9055_wdt.c b/drivers/watchdog/da9055_wdt.c
index 495089d..04d1430 100644
--- a/drivers/watchdog/da9055_wdt.c
+++ b/drivers/watchdog/da9055_wdt.c
@@ -161,6 +161,7 @@
 	da9055_wdt->timeout = DA9055_DEF_TIMEOUT;
 	da9055_wdt->info = &da9055_wdt_info;
 	da9055_wdt->ops = &da9055_wdt_ops;
+	da9055_wdt->parent = &pdev->dev;
 	watchdog_set_nowayout(da9055_wdt, nowayout);
 	watchdog_set_drvdata(da9055_wdt, driver_data);
 
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index b3a870c..7386111 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -210,6 +210,7 @@
 	wdt->wdtdev.max_timeout = DA9062_WDT_MAX_TIMEOUT;
 	wdt->wdtdev.timeout = DA9062_WDG_DEFAULT_TIMEOUT;
 	wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
+	wdt->wdtdev.parent = &pdev->dev;
 
 	watchdog_set_drvdata(&wdt->wdtdev, wdt);
 	dev_set_drvdata(&pdev->dev, wdt);
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index e2fe2eb..6bf130b 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -175,6 +175,7 @@
 	wdt->wdtdev.min_timeout = DA9063_WDT_MIN_TIMEOUT;
 	wdt->wdtdev.max_timeout = DA9063_WDT_MAX_TIMEOUT;
 	wdt->wdtdev.timeout = DA9063_WDG_TIMEOUT;
+	wdt->wdtdev.parent = &pdev->dev;
 
 	wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
 
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index cfdf8a4..17454ca 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -179,6 +179,7 @@
 	wdd->min_timeout	= 1;
 	wdd->max_timeout	= MAX_HEARTBEAT;
 	wdd->timeout		= DEFAULT_HEARTBEAT;
+	wdd->parent		= &pdev->dev;
 
 	watchdog_init_timeout(wdd, heartbeat, dev);
 
diff --git a/drivers/watchdog/digicolor_wdt.c b/drivers/watchdog/digicolor_wdt.c
index 31d8e49..50abe1b 100644
--- a/drivers/watchdog/digicolor_wdt.c
+++ b/drivers/watchdog/digicolor_wdt.c
@@ -143,6 +143,7 @@
 	}
 	dc_wdt_wdd.max_timeout = U32_MAX / clk_get_rate(wdt->clk);
 	dc_wdt_wdd.timeout = dc_wdt_wdd.max_timeout;
+	dc_wdt_wdd.parent = &pdev->dev;
 
 	spin_lock_init(&wdt->lock);
 
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index 7a2cc71..0a4d7cc 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -132,6 +132,7 @@
 	val = readl(mmio_base + EP93XX_WATCHDOG);
 	ep93xx_wdt_wdd.bootstatus = (val & 0x01) ? WDIOF_CARDRESET : 0;
 	ep93xx_wdt_wdd.timeout = timeout;
+	ep93xx_wdt_wdd.parent = &pdev->dev;
 
 	watchdog_set_nowayout(&ep93xx_wdt_wdd, nowayout);
 
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index cc1bdfc..006e234 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -303,6 +303,7 @@
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, gef_wdt_ids);
 
 static struct platform_driver gef_wdt_driver = {
 	.driver = {
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
index 1687cc2..90d59d3 100644
--- a/drivers/watchdog/gpio_wdt.c
+++ b/drivers/watchdog/gpio_wdt.c
@@ -50,12 +50,41 @@
 		gpio_direction_input(priv->gpio);
 }
 
+static void gpio_wdt_hwping(unsigned long data)
+{
+	struct watchdog_device *wdd = (struct watchdog_device *)data;
+	struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+	if (priv->armed && time_after(jiffies, priv->last_jiffies +
+				      msecs_to_jiffies(wdd->timeout * 1000))) {
+		dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n");
+		return;
+	}
+
+	/* Restart timer */
+	mod_timer(&priv->timer, jiffies + priv->hw_margin);
+
+	switch (priv->hw_algo) {
+	case HW_ALGO_TOGGLE:
+		/* Toggle output pin */
+		priv->state = !priv->state;
+		gpio_set_value_cansleep(priv->gpio, priv->state);
+		break;
+	case HW_ALGO_LEVEL:
+		/* Pulse */
+		gpio_set_value_cansleep(priv->gpio, !priv->active_low);
+		udelay(1);
+		gpio_set_value_cansleep(priv->gpio, priv->active_low);
+		break;
+	}
+}
+
 static void gpio_wdt_start_impl(struct gpio_wdt_priv *priv)
 {
 	priv->state = priv->active_low;
 	gpio_direction_output(priv->gpio, priv->state);
 	priv->last_jiffies = jiffies;
-	mod_timer(&priv->timer, priv->last_jiffies + priv->hw_margin);
+	gpio_wdt_hwping((unsigned long)&priv->wdd);
 }
 
 static int gpio_wdt_start(struct watchdog_device *wdd)
@@ -97,35 +126,6 @@
 	return gpio_wdt_ping(wdd);
 }
 
-static void gpio_wdt_hwping(unsigned long data)
-{
-	struct watchdog_device *wdd = (struct watchdog_device *)data;
-	struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
-
-	if (priv->armed && time_after(jiffies, priv->last_jiffies +
-				      msecs_to_jiffies(wdd->timeout * 1000))) {
-		dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n");
-		return;
-	}
-
-	/* Restart timer */
-	mod_timer(&priv->timer, jiffies + priv->hw_margin);
-
-	switch (priv->hw_algo) {
-	case HW_ALGO_TOGGLE:
-		/* Toggle output pin */
-		priv->state = !priv->state;
-		gpio_set_value_cansleep(priv->gpio, priv->state);
-		break;
-	case HW_ALGO_LEVEL:
-		/* Pulse */
-		gpio_set_value_cansleep(priv->gpio, !priv->active_low);
-		udelay(1);
-		gpio_set_value_cansleep(priv->gpio, priv->active_low);
-		break;
-	}
-}
-
 static int gpio_wdt_notify_sys(struct notifier_block *nb, unsigned long code,
 			       void *unused)
 {
@@ -182,10 +182,10 @@
 	ret = of_property_read_string(pdev->dev.of_node, "hw_algo", &algo);
 	if (ret)
 		return ret;
-	if (!strncmp(algo, "toggle", 6)) {
+	if (!strcmp(algo, "toggle")) {
 		priv->hw_algo = HW_ALGO_TOGGLE;
 		f = GPIOF_IN;
-	} else if (!strncmp(algo, "level", 5)) {
+	} else if (!strcmp(algo, "level")) {
 		priv->hw_algo = HW_ALGO_LEVEL;
 		f = priv->active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
 	} else {
@@ -217,6 +217,7 @@
 	priv->wdd.ops		= &gpio_wdt_ops;
 	priv->wdd.min_timeout	= SOFT_TIMEOUT_MIN;
 	priv->wdd.max_timeout	= SOFT_TIMEOUT_MAX;
+	priv->wdd.parent	= &pdev->dev;
 
 	if (watchdog_init_timeout(&priv->wdd, 0, &pdev->dev) < 0)
 		priv->wdd.timeout = SOFT_TIMEOUT_DEF;
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c
index 9bc39ae..78c2541 100644
--- a/drivers/watchdog/ie6xx_wdt.c
+++ b/drivers/watchdog/ie6xx_wdt.c
@@ -267,6 +267,7 @@
 
 	ie6xx_wdt_dev.timeout = timeout;
 	watchdog_set_nowayout(&ie6xx_wdt_dev, nowayout);
+	ie6xx_wdt_dev.parent = &pdev->dev;
 
 	spin_lock_init(&ie6xx_wdt_data.unlock_sequence);
 
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index 0f73621..15ab072 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -316,6 +316,7 @@
 {
 	struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
 
+	unregister_restart_handler(&pdc_wdt->restart_handler);
 	pdc_wdt_stop(&pdc_wdt->wdt_dev);
 	watchdog_unregister_device(&pdc_wdt->wdt_dev);
 	clk_disable_unprepare(pdc_wdt->wdt_clk);
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 84f6701c..0a436b5 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -137,6 +137,7 @@
 	wdt_dev->min_timeout = MID_WDT_TIMEOUT_MIN;
 	wdt_dev->max_timeout = MID_WDT_TIMEOUT_MAX;
 	wdt_dev->timeout = MID_WDT_DEFAULT_TIMEOUT;
+	wdt_dev->parent = &pdev->dev;
 
 	watchdog_set_drvdata(wdt_dev, &pdev->dev);
 	platform_set_drvdata(pdev, wdt_dev);
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 4c2cc09..6a7d5c3 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -174,6 +174,7 @@
 	jz4740_wdt->timeout = heartbeat;
 	jz4740_wdt->min_timeout = 1;
 	jz4740_wdt->max_timeout = MAX_HEARTBEAT;
+	jz4740_wdt->parent = &pdev->dev;
 	watchdog_set_nowayout(jz4740_wdt, nowayout);
 	watchdog_set_drvdata(jz4740_wdt, drvdata);
 
diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c
new file mode 100644
index 0000000..ab7b8b1
--- /dev/null
+++ b/drivers/watchdog/lpc18xx_wdt.c
@@ -0,0 +1,340 @@
+/*
+ * NXP LPC18xx Watchdog Timer (WDT)
+ *
+ * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Notes
+ * -----
+ * The Watchdog consists of a fixed divide-by-4 clock pre-scaler and a 24-bit
+ * counter which decrements on every clock cycle.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+/* Registers */
+#define LPC18XX_WDT_MOD			0x00
+#define LPC18XX_WDT_MOD_WDEN		BIT(0)
+#define LPC18XX_WDT_MOD_WDRESET		BIT(1)
+
+#define LPC18XX_WDT_TC			0x04
+#define LPC18XX_WDT_TC_MIN		0xff
+#define LPC18XX_WDT_TC_MAX		0xffffff
+
+#define LPC18XX_WDT_FEED		0x08
+#define LPC18XX_WDT_FEED_MAGIC1		0xaa
+#define LPC18XX_WDT_FEED_MAGIC2		0x55
+
+#define LPC18XX_WDT_TV			0x0c
+
+/* Clock pre-scaler */
+#define LPC18XX_WDT_CLK_DIV		4
+
+/* Timeout values in seconds */
+#define LPC18XX_WDT_DEF_TIMEOUT		30U
+
+static int heartbeat;
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds (default="
+		 __MODULE_STRING(LPC18XX_WDT_DEF_TIMEOUT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+		 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct lpc18xx_wdt_dev {
+	struct watchdog_device	wdt_dev;
+	struct clk		*reg_clk;
+	struct clk		*wdt_clk;
+	unsigned long		clk_rate;
+	void __iomem		*base;
+	struct timer_list	timer;
+	struct notifier_block	restart_handler;
+	spinlock_t		lock;
+};
+
+static int lpc18xx_wdt_feed(struct watchdog_device *wdt_dev)
+{
+	struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
+	unsigned long flags;
+
+	/*
+	 * An abort condition will occur if an interrupt happens during the feed
+	 * sequence.
+	 */
+	spin_lock_irqsave(&lpc18xx_wdt->lock, flags);
+	writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
+	writel(LPC18XX_WDT_FEED_MAGIC2, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
+	spin_unlock_irqrestore(&lpc18xx_wdt->lock, flags);
+
+	return 0;
+}
+
+static void lpc18xx_wdt_timer_feed(unsigned long data)
+{
+	struct watchdog_device *wdt_dev = (struct watchdog_device *)data;
+	struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
+
+	lpc18xx_wdt_feed(wdt_dev);
+
+	/* Use safe value (1/2 of real timeout) */
+	mod_timer(&lpc18xx_wdt->timer, jiffies +
+		  msecs_to_jiffies((wdt_dev->timeout * MSEC_PER_SEC) / 2));
+}
+
+/*
+ * Since LPC18xx Watchdog cannot be disabled in hardware, we must keep feeding
+ * it with a timer until userspace watchdog software takes over.
+ */
+static int lpc18xx_wdt_stop(struct watchdog_device *wdt_dev)
+{
+	lpc18xx_wdt_timer_feed((unsigned long)wdt_dev);
+
+	return 0;
+}
+
+static void __lpc18xx_wdt_set_timeout(struct lpc18xx_wdt_dev *lpc18xx_wdt)
+{
+	unsigned int val;
+
+	val = DIV_ROUND_UP(lpc18xx_wdt->wdt_dev.timeout * lpc18xx_wdt->clk_rate,
+			   LPC18XX_WDT_CLK_DIV);
+	writel(val, lpc18xx_wdt->base + LPC18XX_WDT_TC);
+}
+
+static int lpc18xx_wdt_set_timeout(struct watchdog_device *wdt_dev,
+				   unsigned int new_timeout)
+{
+	struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
+
+	lpc18xx_wdt->wdt_dev.timeout = new_timeout;
+	__lpc18xx_wdt_set_timeout(lpc18xx_wdt);
+
+	return 0;
+}
+
+static unsigned int lpc18xx_wdt_get_timeleft(struct watchdog_device *wdt_dev)
+{
+	struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
+	unsigned int val;
+
+	val = readl(lpc18xx_wdt->base + LPC18XX_WDT_TV);
+	return (val * LPC18XX_WDT_CLK_DIV) / lpc18xx_wdt->clk_rate;
+}
+
+static int lpc18xx_wdt_start(struct watchdog_device *wdt_dev)
+{
+	struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
+	unsigned int val;
+
+	if (timer_pending(&lpc18xx_wdt->timer))
+		del_timer(&lpc18xx_wdt->timer);
+
+	val = readl(lpc18xx_wdt->base + LPC18XX_WDT_MOD);
+	val |= LPC18XX_WDT_MOD_WDEN;
+	val |= LPC18XX_WDT_MOD_WDRESET;
+	writel(val, lpc18xx_wdt->base + LPC18XX_WDT_MOD);
+
+	/*
+	 * Setting the WDEN bit in the WDMOD register is not sufficient to
+	 * enable the Watchdog. A valid feed sequence must be completed after
+	 * setting WDEN before the Watchdog is capable of generating a reset.
+	 */
+	lpc18xx_wdt_feed(wdt_dev);
+
+	return 0;
+}
+
+static struct watchdog_info lpc18xx_wdt_info = {
+	.identity	= "NXP LPC18xx Watchdog",
+	.options	= WDIOF_SETTIMEOUT |
+			  WDIOF_KEEPALIVEPING |
+			  WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops lpc18xx_wdt_ops = {
+	.owner		= THIS_MODULE,
+	.start		= lpc18xx_wdt_start,
+	.stop		= lpc18xx_wdt_stop,
+	.ping		= lpc18xx_wdt_feed,
+	.set_timeout	= lpc18xx_wdt_set_timeout,
+	.get_timeleft	= lpc18xx_wdt_get_timeleft,
+};
+
+static int lpc18xx_wdt_restart(struct notifier_block *this, unsigned long mode,
+			       void *cmd)
+{
+	struct lpc18xx_wdt_dev *lpc18xx_wdt = container_of(this,
+				struct lpc18xx_wdt_dev, restart_handler);
+	unsigned long flags;
+	int val;
+
+	/*
+	 * Incorrect feed sequence causes immediate watchdog reset if enabled.
+	 */
+	spin_lock_irqsave(&lpc18xx_wdt->lock, flags);
+
+	val = readl(lpc18xx_wdt->base + LPC18XX_WDT_MOD);
+	val |= LPC18XX_WDT_MOD_WDEN;
+	val |= LPC18XX_WDT_MOD_WDRESET;
+	writel(val, lpc18xx_wdt->base + LPC18XX_WDT_MOD);
+
+	writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
+	writel(LPC18XX_WDT_FEED_MAGIC2, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
+
+	writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
+	writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
+
+	spin_unlock_irqrestore(&lpc18xx_wdt->lock, flags);
+
+	return NOTIFY_OK;
+}
+
+static int lpc18xx_wdt_probe(struct platform_device *pdev)
+{
+	struct lpc18xx_wdt_dev *lpc18xx_wdt;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret;
+
+	lpc18xx_wdt = devm_kzalloc(dev, sizeof(*lpc18xx_wdt), GFP_KERNEL);
+	if (!lpc18xx_wdt)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	lpc18xx_wdt->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(lpc18xx_wdt->base))
+		return PTR_ERR(lpc18xx_wdt->base);
+
+	lpc18xx_wdt->reg_clk = devm_clk_get(dev, "reg");
+	if (IS_ERR(lpc18xx_wdt->reg_clk)) {
+		dev_err(dev, "failed to get the reg clock\n");
+		return PTR_ERR(lpc18xx_wdt->reg_clk);
+	}
+
+	lpc18xx_wdt->wdt_clk = devm_clk_get(dev, "wdtclk");
+	if (IS_ERR(lpc18xx_wdt->wdt_clk)) {
+		dev_err(dev, "failed to get the wdt clock\n");
+		return PTR_ERR(lpc18xx_wdt->wdt_clk);
+	}
+
+	ret = clk_prepare_enable(lpc18xx_wdt->reg_clk);
+	if (ret) {
+		dev_err(dev, "could not prepare or enable sys clock\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(lpc18xx_wdt->wdt_clk);
+	if (ret) {
+		dev_err(dev, "could not prepare or enable wdt clock\n");
+		goto disable_reg_clk;
+	}
+
+	/* We use the clock rate to calculate timeouts */
+	lpc18xx_wdt->clk_rate = clk_get_rate(lpc18xx_wdt->wdt_clk);
+	if (lpc18xx_wdt->clk_rate == 0) {
+		dev_err(dev, "failed to get clock rate\n");
+		ret = -EINVAL;
+		goto disable_wdt_clk;
+	}
+
+	lpc18xx_wdt->wdt_dev.info = &lpc18xx_wdt_info;
+	lpc18xx_wdt->wdt_dev.ops = &lpc18xx_wdt_ops;
+
+	lpc18xx_wdt->wdt_dev.min_timeout = DIV_ROUND_UP(LPC18XX_WDT_TC_MIN *
+				LPC18XX_WDT_CLK_DIV, lpc18xx_wdt->clk_rate);
+
+	lpc18xx_wdt->wdt_dev.max_timeout = (LPC18XX_WDT_TC_MAX *
+				LPC18XX_WDT_CLK_DIV) / lpc18xx_wdt->clk_rate;
+
+	lpc18xx_wdt->wdt_dev.timeout = min(lpc18xx_wdt->wdt_dev.max_timeout,
+					   LPC18XX_WDT_DEF_TIMEOUT);
+
+	spin_lock_init(&lpc18xx_wdt->lock);
+
+	lpc18xx_wdt->wdt_dev.parent = dev;
+	watchdog_set_drvdata(&lpc18xx_wdt->wdt_dev, lpc18xx_wdt);
+
+	ret = watchdog_init_timeout(&lpc18xx_wdt->wdt_dev, heartbeat, dev);
+
+	__lpc18xx_wdt_set_timeout(lpc18xx_wdt);
+
+	setup_timer(&lpc18xx_wdt->timer, lpc18xx_wdt_timer_feed,
+		    (unsigned long)&lpc18xx_wdt->wdt_dev);
+
+	watchdog_set_nowayout(&lpc18xx_wdt->wdt_dev, nowayout);
+
+	platform_set_drvdata(pdev, lpc18xx_wdt);
+
+	ret = watchdog_register_device(&lpc18xx_wdt->wdt_dev);
+	if (ret)
+		goto disable_wdt_clk;
+
+	lpc18xx_wdt->restart_handler.notifier_call = lpc18xx_wdt_restart;
+	lpc18xx_wdt->restart_handler.priority = 128;
+	ret = register_restart_handler(&lpc18xx_wdt->restart_handler);
+	if (ret)
+		dev_warn(dev, "failed to register restart handler: %d\n", ret);
+
+	return 0;
+
+disable_wdt_clk:
+	clk_disable_unprepare(lpc18xx_wdt->wdt_clk);
+disable_reg_clk:
+	clk_disable_unprepare(lpc18xx_wdt->reg_clk);
+	return ret;
+}
+
+static void lpc18xx_wdt_shutdown(struct platform_device *pdev)
+{
+	struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev);
+
+	lpc18xx_wdt_stop(&lpc18xx_wdt->wdt_dev);
+}
+
+static int lpc18xx_wdt_remove(struct platform_device *pdev)
+{
+	struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev);
+
+	unregister_restart_handler(&lpc18xx_wdt->restart_handler);
+
+	dev_warn(&pdev->dev, "I quit now, hardware will probably reboot!\n");
+	del_timer(&lpc18xx_wdt->timer);
+
+	watchdog_unregister_device(&lpc18xx_wdt->wdt_dev);
+	clk_disable_unprepare(lpc18xx_wdt->wdt_clk);
+	clk_disable_unprepare(lpc18xx_wdt->reg_clk);
+
+	return 0;
+}
+
+static const struct of_device_id lpc18xx_wdt_match[] = {
+	{ .compatible = "nxp,lpc1850-wwdt" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_wdt_match);
+
+static struct platform_driver lpc18xx_wdt_driver = {
+	.driver = {
+		.name = "lpc18xx-wdt",
+		.of_match_table	= lpc18xx_wdt_match,
+	},
+	.probe = lpc18xx_wdt_probe,
+	.remove = lpc18xx_wdt_remove,
+	.shutdown = lpc18xx_wdt_shutdown,
+};
+module_platform_driver(lpc18xx_wdt_driver);
+
+MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>");
+MODULE_DESCRIPTION("NXP LPC18xx Watchdog Timer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c
index d193a5e..098fa9c 100644
--- a/drivers/watchdog/mena21_wdt.c
+++ b/drivers/watchdog/mena21_wdt.c
@@ -197,6 +197,7 @@
 	watchdog_init_timeout(&a21_wdt, 30, &pdev->dev);
 	watchdog_set_nowayout(&a21_wdt, nowayout);
 	watchdog_set_drvdata(&a21_wdt, drv);
+	a21_wdt.parent = &pdev->dev;
 
 	reset = a21_wdt_get_bootstatus(drv);
 	if (reset == 2)
@@ -252,6 +253,7 @@
 	{ .compatible = "men,a021-wdt" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, a21_wdt_ids);
 
 static struct platform_driver a21_wdt_driver = {
 	.probe = a21_wdt_probe,
diff --git a/drivers/watchdog/menf21bmc_wdt.c b/drivers/watchdog/menf21bmc_wdt.c
index 59f0913..3aefdde 100644
--- a/drivers/watchdog/menf21bmc_wdt.c
+++ b/drivers/watchdog/menf21bmc_wdt.c
@@ -130,6 +130,7 @@
 	drv_data->wdt.info = &menf21bmc_wdt_info;
 	drv_data->wdt.min_timeout = BMC_WD_TIMEOUT_MIN;
 	drv_data->wdt.max_timeout = BMC_WD_TIMEOUT_MAX;
+	drv_data->wdt.parent = &pdev->dev;
 	drv_data->i2c_client = i2c_client;
 
 	/*
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 2789da2..60b0605 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -168,6 +168,7 @@
 	{ .compatible = "moxa,moxart-watchdog" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, moxart_watchdog_match);
 
 static struct platform_driver moxart_wdt_driver = {
 	.probe      = moxart_wdt_probe,
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 689381a..5f2273a 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -50,8 +50,12 @@
 	bool hw_enabled;
 };
 
-static struct mpc8xxx_wdt __iomem *wd_base;
-static int mpc8xxx_wdt_init_late(void);
+struct mpc8xxx_wdt_ddata {
+	struct mpc8xxx_wdt __iomem *base;
+	struct watchdog_device wdd;
+	struct timer_list timer;
+	spinlock_t lock;
+};
 
 static u16 timeout = 0xffff;
 module_param(timeout, ushort, 0);
@@ -68,65 +72,59 @@
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
 		 "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
-/*
- * We always prescale, but if someone really doesn't want to they can set this
- * to 0
- */
-static int prescale = 1;
-
-static DEFINE_SPINLOCK(wdt_spinlock);
-
-static void mpc8xxx_wdt_keepalive(void)
+static void mpc8xxx_wdt_keepalive(struct mpc8xxx_wdt_ddata *ddata)
 {
 	/* Ping the WDT */
-	spin_lock(&wdt_spinlock);
-	out_be16(&wd_base->swsrr, 0x556c);
-	out_be16(&wd_base->swsrr, 0xaa39);
-	spin_unlock(&wdt_spinlock);
+	spin_lock(&ddata->lock);
+	out_be16(&ddata->base->swsrr, 0x556c);
+	out_be16(&ddata->base->swsrr, 0xaa39);
+	spin_unlock(&ddata->lock);
 }
 
-static struct watchdog_device mpc8xxx_wdt_dev;
-static void mpc8xxx_wdt_timer_ping(unsigned long arg);
-static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0,
-		(unsigned long)&mpc8xxx_wdt_dev);
-
 static void mpc8xxx_wdt_timer_ping(unsigned long arg)
 {
-	struct watchdog_device *w = (struct watchdog_device *)arg;
+	struct mpc8xxx_wdt_ddata *ddata = (void *)arg;
 
-	mpc8xxx_wdt_keepalive();
+	mpc8xxx_wdt_keepalive(ddata);
 	/* We're pinging it twice faster than needed, just to be sure. */
-	mod_timer(&wdt_timer, jiffies + HZ * w->timeout / 2);
+	mod_timer(&ddata->timer, jiffies + HZ * ddata->wdd.timeout / 2);
 }
 
 static int mpc8xxx_wdt_start(struct watchdog_device *w)
 {
-	u32 tmp = SWCRR_SWEN;
+	struct mpc8xxx_wdt_ddata *ddata =
+		container_of(w, struct mpc8xxx_wdt_ddata, wdd);
+
+	u32 tmp = SWCRR_SWEN | SWCRR_SWPR;
 
 	/* Good, fire up the show */
-	if (prescale)
-		tmp |= SWCRR_SWPR;
 	if (reset)
 		tmp |= SWCRR_SWRI;
 
 	tmp |= timeout << 16;
 
-	out_be32(&wd_base->swcrr, tmp);
+	out_be32(&ddata->base->swcrr, tmp);
 
-	del_timer_sync(&wdt_timer);
+	del_timer_sync(&ddata->timer);
 
 	return 0;
 }
 
 static int mpc8xxx_wdt_ping(struct watchdog_device *w)
 {
-	mpc8xxx_wdt_keepalive();
+	struct mpc8xxx_wdt_ddata *ddata =
+		container_of(w, struct mpc8xxx_wdt_ddata, wdd);
+
+	mpc8xxx_wdt_keepalive(ddata);
 	return 0;
 }
 
 static int mpc8xxx_wdt_stop(struct watchdog_device *w)
 {
-	mod_timer(&wdt_timer, jiffies);
+	struct mpc8xxx_wdt_ddata *ddata =
+		container_of(w, struct mpc8xxx_wdt_ddata, wdd);
+
+	mod_timer(&ddata->timer, jiffies);
 	return 0;
 }
 
@@ -143,53 +141,57 @@
 	.stop = mpc8xxx_wdt_stop,
 };
 
-static struct watchdog_device mpc8xxx_wdt_dev = {
-	.info = &mpc8xxx_wdt_info,
-	.ops = &mpc8xxx_wdt_ops,
-};
-
-static const struct of_device_id mpc8xxx_wdt_match[];
 static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
 {
 	int ret;
-	const struct of_device_id *match;
-	struct device_node *np = ofdev->dev.of_node;
+	struct resource *res;
 	const struct mpc8xxx_wdt_type *wdt_type;
+	struct mpc8xxx_wdt_ddata *ddata;
 	u32 freq = fsl_get_sys_freq();
 	bool enabled;
 	unsigned int timeout_sec;
 
-	match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev);
-	if (!match)
+	wdt_type = of_device_get_match_data(&ofdev->dev);
+	if (!wdt_type)
 		return -EINVAL;
-	wdt_type = match->data;
 
 	if (!freq || freq == -1)
 		return -EINVAL;
 
-	wd_base = of_iomap(np, 0);
-	if (!wd_base)
+	ddata = devm_kzalloc(&ofdev->dev, sizeof(*ddata), GFP_KERNEL);
+	if (!ddata)
 		return -ENOMEM;
 
-	enabled = in_be32(&wd_base->swcrr) & SWCRR_SWEN;
+	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+	ddata->base = devm_ioremap_resource(&ofdev->dev, res);
+	if (IS_ERR(ddata->base))
+		return PTR_ERR(ddata->base);
+
+	enabled = in_be32(&ddata->base->swcrr) & SWCRR_SWEN;
 	if (!enabled && wdt_type->hw_enabled) {
 		pr_info("could not be enabled in software\n");
-		ret = -ENOSYS;
-		goto err_unmap;
+		return -ENODEV;
 	}
 
-	/* Calculate the timeout in seconds */
-	if (prescale)
-		timeout_sec = (timeout * wdt_type->prescaler) / freq;
-	else
-		timeout_sec = timeout / freq;
+	spin_lock_init(&ddata->lock);
+	setup_timer(&ddata->timer, mpc8xxx_wdt_timer_ping,
+		    (unsigned long)ddata);
 
-	mpc8xxx_wdt_dev.timeout = timeout_sec;
-#ifdef MODULE
-	ret = mpc8xxx_wdt_init_late();
-	if (ret)
-		goto err_unmap;
-#endif
+	ddata->wdd.info = &mpc8xxx_wdt_info,
+	ddata->wdd.ops = &mpc8xxx_wdt_ops,
+
+	/* Calculate the timeout in seconds */
+	timeout_sec = (timeout * wdt_type->prescaler) / freq;
+
+	ddata->wdd.timeout = timeout_sec;
+
+	watchdog_set_nowayout(&ddata->wdd, nowayout);
+
+	ret = watchdog_register_device(&ddata->wdd);
+	if (ret) {
+		pr_err("cannot register watchdog device (err=%d)\n", ret);
+		return ret;
+	}
 
 	pr_info("WDT driver for MPC8xxx initialized. mode:%s timeout=%d (%d seconds)\n",
 		reset ? "reset" : "interrupt", timeout, timeout_sec);
@@ -200,21 +202,20 @@
 	 * userspace handles it.
 	 */
 	if (enabled)
-		mod_timer(&wdt_timer, jiffies);
+		mod_timer(&ddata->timer, jiffies);
+
+	platform_set_drvdata(ofdev, ddata);
 	return 0;
-err_unmap:
-	iounmap(wd_base);
-	wd_base = NULL;
-	return ret;
 }
 
 static int mpc8xxx_wdt_remove(struct platform_device *ofdev)
 {
+	struct mpc8xxx_wdt_ddata *ddata = platform_get_drvdata(ofdev);
+
 	pr_crit("Watchdog removed, expect the %s soon!\n",
 		reset ? "reset" : "machine check exception");
-	del_timer_sync(&wdt_timer);
-	watchdog_unregister_device(&mpc8xxx_wdt_dev);
-	iounmap(wd_base);
+	del_timer_sync(&ddata->timer);
+	watchdog_unregister_device(&ddata->wdd);
 
 	return 0;
 }
@@ -253,31 +254,6 @@
 	},
 };
 
-/*
- * We do wdt initialization in two steps: arch_initcall probes the wdt
- * very early to start pinging the watchdog (misc devices are not yet
- * available), and later module_init() just registers the misc device.
- */
-static int mpc8xxx_wdt_init_late(void)
-{
-	int ret;
-
-	if (!wd_base)
-		return -ENODEV;
-
-	watchdog_set_nowayout(&mpc8xxx_wdt_dev, nowayout);
-
-	ret = watchdog_register_device(&mpc8xxx_wdt_dev);
-	if (ret) {
-		pr_err("cannot register watchdog device (err=%d)\n", ret);
-		return ret;
-	}
-	return 0;
-}
-#ifndef MODULE
-module_init(mpc8xxx_wdt_init_late);
-#endif
-
 static int __init mpc8xxx_wdt_init(void)
 {
 	return platform_driver_register(&mpc8xxx_wdt_driver);
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 938b987..6ad9df9 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -210,6 +210,14 @@
 	return 0;
 }
 
+static void mtk_wdt_shutdown(struct platform_device *pdev)
+{
+	struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
+
+	if (watchdog_active(&mtk_wdt->wdt_dev))
+		mtk_wdt_stop(&mtk_wdt->wdt_dev);
+}
+
 static int mtk_wdt_remove(struct platform_device *pdev)
 {
 	struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
@@ -221,17 +229,48 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int mtk_wdt_suspend(struct device *dev)
+{
+	struct mtk_wdt_dev *mtk_wdt = dev_get_drvdata(dev);
+
+	if (watchdog_active(&mtk_wdt->wdt_dev))
+		mtk_wdt_stop(&mtk_wdt->wdt_dev);
+
+	return 0;
+}
+
+static int mtk_wdt_resume(struct device *dev)
+{
+	struct mtk_wdt_dev *mtk_wdt = dev_get_drvdata(dev);
+
+	if (watchdog_active(&mtk_wdt->wdt_dev)) {
+		mtk_wdt_start(&mtk_wdt->wdt_dev);
+		mtk_wdt_ping(&mtk_wdt->wdt_dev);
+	}
+
+	return 0;
+}
+#endif
+
 static const struct of_device_id mtk_wdt_dt_ids[] = {
 	{ .compatible = "mediatek,mt6589-wdt" },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
 
+static const struct dev_pm_ops mtk_wdt_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mtk_wdt_suspend,
+				mtk_wdt_resume)
+};
+
 static struct platform_driver mtk_wdt_driver = {
 	.probe		= mtk_wdt_probe,
 	.remove		= mtk_wdt_remove,
+	.shutdown	= mtk_wdt_shutdown,
 	.driver		= {
 		.name		= DRV_NAME,
+		.pm		= &mtk_wdt_pm_ops,
 		.of_match_table	= mtk_wdt_dt_ids,
 	},
 };
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index c028454..bd917bb 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -294,6 +294,8 @@
 	  PCI_ANY_ID, PCI_ANY_ID, },
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
 	  PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS,
+	  PCI_ANY_ID, PCI_ANY_ID, },
 	{ 0, },			/* End of list */
 };
 MODULE_DEVICE_TABLE(pci, tco_pci_tbl);
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index de911c7..d96bee0 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -253,6 +253,7 @@
 	wdev->wdog.ops = &omap_wdt_ops;
 	wdev->wdog.min_timeout = TIMER_MARGIN_MIN;
 	wdev->wdog.max_timeout = TIMER_MARGIN_MAX;
+	wdev->wdog.parent = &pdev->dev;
 
 	if (watchdog_init_timeout(&wdev->wdog, timer_margin, &pdev->dev) < 0)
 		wdev->wdog.timeout = TIMER_MARGIN_DEFAULT;
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index ef0c628..c6b8f4a 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -567,6 +567,7 @@
 
 	dev->wdt.timeout = wdt_max_duration;
 	dev->wdt.max_timeout = wdt_max_duration;
+	dev->wdt.parent = &pdev->dev;
 	watchdog_init_timeout(&dev->wdt, heartbeat, &pdev->dev);
 
 	platform_set_drvdata(pdev, &dev->wdt);
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index b9c6049..4224b3e 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -167,6 +167,7 @@
 
 	pnx4008_wdd.bootstatus = (readl(WDTIM_RES(wdt_base)) & WDOG_RESET) ?
 			WDIOF_CARDRESET : 0;
+	pnx4008_wdd.parent = &pdev->dev;
 	watchdog_set_nowayout(&pnx4008_wdd, nowayout);
 
 	pnx4008_wdt_stop(&pnx4008_wdd);	/* disable for now */
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index aa03ca8..773dcfa 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -171,6 +171,7 @@
 	wdt->wdd.ops = &qcom_wdt_ops;
 	wdt->wdd.min_timeout = 1;
 	wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
+	wdt->wdd.parent = &pdev->dev;
 
 	/*
 	 * If 'timeout-sec' unspecified in devicetree, assume a 30 second
diff --git a/drivers/watchdog/retu_wdt.c b/drivers/watchdog/retu_wdt.c
index b7c68e27..39cd51d 100644
--- a/drivers/watchdog/retu_wdt.c
+++ b/drivers/watchdog/retu_wdt.c
@@ -127,6 +127,7 @@
 	retu_wdt->timeout	= RETU_WDT_MAX_TIMER;
 	retu_wdt->min_timeout	= 0;
 	retu_wdt->max_timeout	= RETU_WDT_MAX_TIMER;
+	retu_wdt->parent	= &pdev->dev;
 
 	watchdog_set_drvdata(retu_wdt, wdev);
 	watchdog_set_nowayout(retu_wdt, nowayout);
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index a6f7e2e..1967919 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -161,6 +161,7 @@
 	rt288x_wdt_dev.dev = &pdev->dev;
 	rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause();
 	rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq);
+	rt288x_wdt_dev.parent = &pdev->dev;
 
 	watchdog_init_timeout(&rt288x_wdt_dev, rt288x_wdt_dev.max_timeout,
 			      &pdev->dev);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index e89ae02..d781000 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -607,6 +607,7 @@
 	watchdog_set_nowayout(&wdt->wdt_device, nowayout);
 
 	wdt->wdt_device.bootstatus = s3c2410wdt_get_bootstatus(wdt);
+	wdt->wdt_device.parent = &pdev->dev;
 
 	ret = watchdog_register_device(&wdt->wdt_device);
 	if (ret) {
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
new file mode 100644
index 0000000..a49634c
--- /dev/null
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -0,0 +1,280 @@
+/*
+ * Driver for Atmel SAMA5D4 Watchdog Timer
+ *
+ * Copyright (C) 2015 Atmel Corporation
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+#include "at91sam9_wdt.h"
+
+/* minimum and maximum watchdog timeout, in seconds */
+#define MIN_WDT_TIMEOUT		1
+#define MAX_WDT_TIMEOUT		16
+#define WDT_DEFAULT_TIMEOUT	MAX_WDT_TIMEOUT
+
+#define WDT_SEC2TICKS(s)	((s) ? (((s) << 8) - 1) : 0)
+
+struct sama5d4_wdt {
+	struct watchdog_device	wdd;
+	void __iomem		*reg_base;
+	u32	config;
+};
+
+static int wdt_timeout = WDT_DEFAULT_TIMEOUT;
+static bool nowayout = WATCHDOG_NOWAYOUT;
+
+module_param(wdt_timeout, int, 0);
+MODULE_PARM_DESC(wdt_timeout,
+	"Watchdog timeout in seconds. (default = "
+	__MODULE_STRING(WDT_DEFAULT_TIMEOUT) ")");
+
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+	"Watchdog cannot be stopped once started (default="
+	__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+#define wdt_read(wdt, field) \
+	readl_relaxed((wdt)->reg_base + (field))
+
+#define wdt_write(wtd, field, val) \
+	writel_relaxed((val), (wdt)->reg_base + (field))
+
+static int sama5d4_wdt_start(struct watchdog_device *wdd)
+{
+	struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
+	u32 reg;
+
+	reg = wdt_read(wdt, AT91_WDT_MR);
+	reg &= ~AT91_WDT_WDDIS;
+	wdt_write(wdt, AT91_WDT_MR, reg);
+
+	return 0;
+}
+
+static int sama5d4_wdt_stop(struct watchdog_device *wdd)
+{
+	struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
+	u32 reg;
+
+	reg = wdt_read(wdt, AT91_WDT_MR);
+	reg |= AT91_WDT_WDDIS;
+	wdt_write(wdt, AT91_WDT_MR, reg);
+
+	return 0;
+}
+
+static int sama5d4_wdt_ping(struct watchdog_device *wdd)
+{
+	struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
+
+	wdt_write(wdt, AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
+
+	return 0;
+}
+
+static int sama5d4_wdt_set_timeout(struct watchdog_device *wdd,
+				 unsigned int timeout)
+{
+	struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
+	u32 value = WDT_SEC2TICKS(timeout);
+	u32 reg;
+
+	reg = wdt_read(wdt, AT91_WDT_MR);
+	reg &= ~AT91_WDT_WDV;
+	reg &= ~AT91_WDT_WDD;
+	reg |= AT91_WDT_SET_WDV(value);
+	reg |= AT91_WDT_SET_WDD(value);
+	wdt_write(wdt, AT91_WDT_MR, reg);
+
+	wdd->timeout = timeout;
+
+	return 0;
+}
+
+static const struct watchdog_info sama5d4_wdt_info = {
+	.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+	.identity = "Atmel SAMA5D4 Watchdog",
+};
+
+static struct watchdog_ops sama5d4_wdt_ops = {
+	.owner = THIS_MODULE,
+	.start = sama5d4_wdt_start,
+	.stop = sama5d4_wdt_stop,
+	.ping = sama5d4_wdt_ping,
+	.set_timeout = sama5d4_wdt_set_timeout,
+};
+
+static irqreturn_t sama5d4_wdt_irq_handler(int irq, void *dev_id)
+{
+	struct sama5d4_wdt *wdt = platform_get_drvdata(dev_id);
+
+	if (wdt_read(wdt, AT91_WDT_SR)) {
+		pr_crit("Atmel Watchdog Software Reset\n");
+		emergency_restart();
+		pr_crit("Reboot didn't succeed\n");
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt)
+{
+	const char *tmp;
+
+	wdt->config = AT91_WDT_WDDIS;
+
+	if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) &&
+	    !strcmp(tmp, "software"))
+		wdt->config |= AT91_WDT_WDFIEN;
+	else
+		wdt->config |= AT91_WDT_WDRSTEN;
+
+	if (of_property_read_bool(np, "atmel,idle-halt"))
+		wdt->config |= AT91_WDT_WDIDLEHLT;
+
+	if (of_property_read_bool(np, "atmel,dbg-halt"))
+		wdt->config |= AT91_WDT_WDDBGHLT;
+
+	return 0;
+}
+
+static int sama5d4_wdt_init(struct sama5d4_wdt *wdt)
+{
+	struct watchdog_device *wdd = &wdt->wdd;
+	u32 value = WDT_SEC2TICKS(wdd->timeout);
+	u32 reg;
+
+	/*
+	 * Because the fields WDV and WDD must not be modified when the WDDIS
+	 * bit is set, so clear the WDDIS bit before writing the WDT_MR.
+	 */
+	reg = wdt_read(wdt, AT91_WDT_MR);
+	reg &= ~AT91_WDT_WDDIS;
+	wdt_write(wdt, AT91_WDT_MR, reg);
+
+	reg = wdt->config;
+	reg |= AT91_WDT_SET_WDD(value);
+	reg |= AT91_WDT_SET_WDV(value);
+
+	wdt_write(wdt, AT91_WDT_MR, reg);
+
+	return 0;
+}
+
+static int sama5d4_wdt_probe(struct platform_device *pdev)
+{
+	struct watchdog_device *wdd;
+	struct sama5d4_wdt *wdt;
+	struct resource *res;
+	void __iomem *regs;
+	u32 irq = 0;
+	int ret;
+
+	wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+	if (!wdt)
+		return -ENOMEM;
+
+	wdd = &wdt->wdd;
+	wdd->timeout = wdt_timeout;
+	wdd->info = &sama5d4_wdt_info;
+	wdd->ops = &sama5d4_wdt_ops;
+	wdd->min_timeout = MIN_WDT_TIMEOUT;
+	wdd->max_timeout = MAX_WDT_TIMEOUT;
+
+	watchdog_set_drvdata(wdd, wdt);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
+
+	wdt->reg_base = regs;
+
+	if (pdev->dev.of_node) {
+		irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+		if (!irq)
+			dev_warn(&pdev->dev, "failed to get IRQ from DT\n");
+
+		ret = of_sama5d4_wdt_init(pdev->dev.of_node, wdt);
+		if (ret)
+			return ret;
+	}
+
+	if ((wdt->config & AT91_WDT_WDFIEN) && irq) {
+		ret = devm_request_irq(&pdev->dev, irq, sama5d4_wdt_irq_handler,
+				       IRQF_SHARED | IRQF_IRQPOLL |
+				       IRQF_NO_SUSPEND, pdev->name, pdev);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"cannot register interrupt handler\n");
+			return ret;
+		}
+	}
+
+	ret = watchdog_init_timeout(wdd, wdt_timeout, &pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to set timeout value\n");
+		return ret;
+	}
+
+	ret = sama5d4_wdt_init(wdt);
+	if (ret)
+		return ret;
+
+	watchdog_set_nowayout(wdd, nowayout);
+
+	ret = watchdog_register_device(wdd);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register watchdog device\n");
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, wdt);
+
+	dev_info(&pdev->dev, "initialized (timeout = %d sec, nowayout = %d)\n",
+		 wdt_timeout, nowayout);
+
+	return 0;
+}
+
+static int sama5d4_wdt_remove(struct platform_device *pdev)
+{
+	struct sama5d4_wdt *wdt = platform_get_drvdata(pdev);
+
+	sama5d4_wdt_stop(&wdt->wdd);
+
+	watchdog_unregister_device(&wdt->wdd);
+
+	return 0;
+}
+
+static const struct of_device_id sama5d4_wdt_of_match[] = {
+	{ .compatible = "atmel,sama5d4-wdt", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sama5d4_wdt_of_match);
+
+static struct platform_driver sama5d4_wdt_driver = {
+	.probe		= sama5d4_wdt_probe,
+	.remove		= sama5d4_wdt_remove,
+	.driver		= {
+		.name	= "sama5d4_wdt",
+		.of_match_table = sama5d4_wdt_of_match,
+	}
+};
+module_platform_driver(sama5d4_wdt_driver);
+
+MODULE_AUTHOR("Atmel Corporation");
+MODULE_DESCRIPTION("Atmel SAMA5D4 Watchdog Timer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 567458b..f908121 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -252,6 +252,7 @@
 
 	watchdog_set_nowayout(&sh_wdt_dev, nowayout);
 	watchdog_set_drvdata(&sh_wdt_dev, wdt);
+	sh_wdt_dev.parent = &pdev->dev;
 
 	spin_lock_init(&wdt->lock);
 
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
index 42fa5c0c5..d0578ab 100644
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -154,6 +154,7 @@
 
 	watchdog_init_timeout(&sirfsoc_wdd, timeout, &pdev->dev);
 	watchdog_set_nowayout(&sirfsoc_wdd, nowayout);
+	sirfsoc_wdd.parent = &pdev->dev;
 
 	ret = watchdog_register_device(&sirfsoc_wdd);
 	if (ret)
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 4e7fec3..01d8162 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -226,6 +226,7 @@
 	wdt->adev = adev;
 	wdt->wdd.info = &wdt_info;
 	wdt->wdd.ops = &wdt_ops;
+	wdt->wdd.parent = &adev->dev;
 
 	spin_lock_init(&wdt->lock);
 	watchdog_set_nowayout(&wdt->wdd, nowayout);
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index 6785afd..14e9bad 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -241,6 +241,7 @@
 		return -EINVAL;
 	}
 	st_wdog_dev.max_timeout = 0xFFFFFFFF / st_wdog->clkrate;
+	st_wdog_dev.parent = &pdev->dev;
 
 	ret = clk_prepare_enable(clk);
 	if (ret) {
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index e7f0d5b..3ee6128 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -76,6 +76,7 @@
 	watchdog_set_drvdata(&stmp3xxx_wdd, &pdev->dev);
 
 	stmp3xxx_wdd.timeout = clamp_t(unsigned, heartbeat, 1, STMP3XXX_MAX_TIMEOUT);
+	stmp3xxx_wdd.parent = &pdev->dev;
 
 	ret = watchdog_register_device(&stmp3xxx_wdd);
 	if (ret < 0) {
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index a29afb3..47bd8a1 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -184,7 +184,7 @@
 	/* Set system reset function */
 	reg = readl(wdt_base + regs->wdt_cfg);
 	reg &= ~(regs->wdt_reset_mask);
-	reg |= ~(regs->wdt_reset_val);
+	reg |= regs->wdt_reset_val;
 	writel(reg, wdt_base + regs->wdt_cfg);
 
 	/* Enable watchdog */
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index 30451ea..7f97cdd 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -218,6 +218,7 @@
 	wdd->ops = &tegra_wdt_ops;
 	wdd->min_timeout = MIN_WDT_TIMEOUT;
 	wdd->max_timeout = MAX_WDT_TIMEOUT;
+	wdd->parent = &pdev->dev;
 
 	watchdog_set_drvdata(wdd, wdt);
 
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 2c1db6f..9bf3cc0 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -83,6 +83,7 @@
 	wdt->timeout		= 30;
 	wdt->min_timeout	= 1;
 	wdt->max_timeout	= 30;
+	wdt->parent = &pdev->dev;
 
 	watchdog_set_nowayout(wdt, nowayout);
 	platform_set_drvdata(pdev, wdt);
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 7f61593..c2da880 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -131,6 +131,7 @@
 	txx9wdt.timeout = timeout;
 	txx9wdt.min_timeout = 1;
 	txx9wdt.max_timeout = WD_MAX_TIMEOUT;
+	txx9wdt.parent = &dev->dev;
 	watchdog_set_nowayout(&txx9wdt, nowayout);
 
 	ret = watchdog_register_device(&txx9wdt);
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
index 9de09ab..37c0843 100644
--- a/drivers/watchdog/ux500_wdt.c
+++ b/drivers/watchdog/ux500_wdt.c
@@ -96,6 +96,7 @@
 			ux500_wdt.max_timeout = WATCHDOG_MAX28;
 	}
 
+	ux500_wdt.parent = &pdev->dev;
 	watchdog_set_nowayout(&ux500_wdt, nowayout);
 
 	/* disable auto off on sleep */
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index 56369c4..5f9cbc3 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -206,6 +206,7 @@
 		timeout = WDT_TIMEOUT;
 
 	wdt_dev.timeout = timeout;
+	wdt_dev.parent = &pdev->dev;
 	watchdog_set_nowayout(&wdt_dev, nowayout);
 	if (readl(wdt_mem) & VIA_WDT_FIRED)
 		wdt_dev.bootstatus |= WDIOF_CARDRESET;
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 2fa17e7..8d1184a 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -215,6 +215,7 @@
 
 	wm831x_wdt->info = &wm831x_wdt_info;
 	wm831x_wdt->ops = &wm831x_wdt_ops;
+	wm831x_wdt->parent = &pdev->dev;
 	watchdog_set_nowayout(wm831x_wdt, nowayout);
 	watchdog_set_drvdata(wm831x_wdt, driver_data);
 
diff --git a/drivers/watchdog/wm8350_wdt.c b/drivers/watchdog/wm8350_wdt.c
index 34d272a..4ab4b83 100644
--- a/drivers/watchdog/wm8350_wdt.c
+++ b/drivers/watchdog/wm8350_wdt.c
@@ -151,6 +151,7 @@
 
 	watchdog_set_nowayout(&wm8350_wdt, nowayout);
 	watchdog_set_drvdata(&wm8350_wdt, wm8350);
+	wm8350_wdt.parent = &pdev->dev;
 
 	/* Default to 4s timeout */
 	wm8350_wdt_set_timeout(&wm8350_wdt, 4);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 22ea424..073bb57 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1242,6 +1242,13 @@
 				goto out_clear;
 			}
 			bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
+			/*
+			 * If the partition is not aligned on a page
+			 * boundary, we can't do dax I/O to it.
+			 */
+			if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512)) ||
+			    (bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
+				bdev->bd_inode->i_flags &= ~S_DAX;
 		}
 	} else {
 		if (bdev->bd_contains == bdev) {
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 1ce06c84..3e36e4a 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -42,8 +42,14 @@
 
 	/* Thresholding related variants */
 	atomic_t pending;
-	int max_active;
-	int current_max;
+
+	/* Up limit of concurrency workers */
+	int limit_active;
+
+	/* Current number of concurrency workers */
+	int current_active;
+
+	/* Threshold to change current_active */
 	int thresh;
 	unsigned int count;
 	spinlock_t thres_lock;
@@ -88,7 +94,7 @@
 BTRFS_WORK_HELPER(scrubparity_helper);
 
 static struct __btrfs_workqueue *
-__btrfs_alloc_workqueue(const char *name, unsigned int flags, int max_active,
+__btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
 			 int thresh)
 {
 	struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
@@ -96,26 +102,31 @@
 	if (!ret)
 		return NULL;
 
-	ret->max_active = max_active;
+	ret->limit_active = limit_active;
 	atomic_set(&ret->pending, 0);
 	if (thresh == 0)
 		thresh = DFT_THRESHOLD;
 	/* For low threshold, disabling threshold is a better choice */
 	if (thresh < DFT_THRESHOLD) {
-		ret->current_max = max_active;
+		ret->current_active = limit_active;
 		ret->thresh = NO_THRESHOLD;
 	} else {
-		ret->current_max = 1;
+		/*
+		 * For threshold-able wq, let its concurrency grow on demand.
+		 * Use minimal max_active at alloc time to reduce resource
+		 * usage.
+		 */
+		ret->current_active = 1;
 		ret->thresh = thresh;
 	}
 
 	if (flags & WQ_HIGHPRI)
 		ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
-						 ret->max_active,
-						 "btrfs", name);
+						 ret->current_active, "btrfs",
+						 name);
 	else
 		ret->normal_wq = alloc_workqueue("%s-%s", flags,
-						 ret->max_active, "btrfs",
+						 ret->current_active, "btrfs",
 						 name);
 	if (!ret->normal_wq) {
 		kfree(ret);
@@ -134,7 +145,7 @@
 
 struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
 					      unsigned int flags,
-					      int max_active,
+					      int limit_active,
 					      int thresh)
 {
 	struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
@@ -143,14 +154,14 @@
 		return NULL;
 
 	ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
-					      max_active, thresh);
+					      limit_active, thresh);
 	if (!ret->normal) {
 		kfree(ret);
 		return NULL;
 	}
 
 	if (flags & WQ_HIGHPRI) {
-		ret->high = __btrfs_alloc_workqueue(name, flags, max_active,
+		ret->high = __btrfs_alloc_workqueue(name, flags, limit_active,
 						    thresh);
 		if (!ret->high) {
 			__btrfs_destroy_workqueue(ret->normal);
@@ -180,7 +191,7 @@
  */
 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
 {
-	int new_max_active;
+	int new_current_active;
 	long pending;
 	int need_change = 0;
 
@@ -197,7 +208,7 @@
 	wq->count %= (wq->thresh / 4);
 	if (!wq->count)
 		goto  out;
-	new_max_active = wq->current_max;
+	new_current_active = wq->current_active;
 
 	/*
 	 * pending may be changed later, but it's OK since we really
@@ -205,19 +216,19 @@
 	 */
 	pending = atomic_read(&wq->pending);
 	if (pending > wq->thresh)
-		new_max_active++;
+		new_current_active++;
 	if (pending < wq->thresh / 2)
-		new_max_active--;
-	new_max_active = clamp_val(new_max_active, 1, wq->max_active);
-	if (new_max_active != wq->current_max)  {
+		new_current_active--;
+	new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
+	if (new_current_active != wq->current_active)  {
 		need_change = 1;
-		wq->current_max = new_max_active;
+		wq->current_active = new_current_active;
 	}
 out:
 	spin_unlock(&wq->thres_lock);
 
 	if (need_change) {
-		workqueue_set_max_active(wq->normal_wq, wq->current_max);
+		workqueue_set_max_active(wq->normal_wq, wq->current_active);
 	}
 }
 
@@ -351,13 +362,13 @@
 	kfree(wq);
 }
 
-void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
+void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
 {
 	if (!wq)
 		return;
-	wq->normal->max_active = max;
+	wq->normal->limit_active = limit_active;
 	if (wq->high)
-		wq->high->max_active = max;
+		wq->high->limit_active = limit_active;
 }
 
 void btrfs_set_work_high_priority(struct btrfs_work *work)
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index b0b093b..ad4d064 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -69,7 +69,7 @@
 
 struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
 					      unsigned int flags,
-					      int max_active,
+					      int limit_active,
 					      int thresh);
 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
 		     btrfs_func_t func,
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 81220b2..0ef5cc1 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -44,8 +44,6 @@
 #define BTRFS_INODE_IN_DELALLOC_LIST		9
 #define BTRFS_INODE_READDIO_NEED_LOCK		10
 #define BTRFS_INODE_HAS_PROPS		        11
-/* DIO is ready to submit */
-#define BTRFS_INODE_DIO_READY		        12
 /*
  * The following 3 bits are meant only for the btree inode.
  * When any of them is set, it means an error happened while writing an
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 564a7de..e54dd59 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -183,8 +183,7 @@
 	}
 
 out:
-	if (path)
-		btrfs_free_path(path);
+	btrfs_free_path(path);
 	return ret;
 }
 
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 9ebd34f..295795a 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3443,6 +3443,26 @@
 	return 0;
 }
 
+int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
+{
+	if ((flags & (BTRFS_BLOCK_GROUP_DUP |
+		      BTRFS_BLOCK_GROUP_RAID0 |
+		      BTRFS_AVAIL_ALLOC_BIT_SINGLE)) ||
+	    ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0))
+		return 0;
+
+	if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
+		     BTRFS_BLOCK_GROUP_RAID5 |
+		     BTRFS_BLOCK_GROUP_RAID10))
+		return 1;
+
+	if (flags & BTRFS_BLOCK_GROUP_RAID6)
+		return 2;
+
+	pr_warn("BTRFS: unknown raid type: %llu\n", flags);
+	return 0;
+}
+
 int btrfs_calc_num_tolerated_disk_barrier_failures(
 	struct btrfs_fs_info *fs_info)
 {
@@ -3452,13 +3472,12 @@
 		       BTRFS_BLOCK_GROUP_SYSTEM,
 		       BTRFS_BLOCK_GROUP_METADATA,
 		       BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
-	int num_types = 4;
 	int i;
 	int c;
 	int num_tolerated_disk_barrier_failures =
 		(int)fs_info->fs_devices->num_devices;
 
-	for (i = 0; i < num_types; i++) {
+	for (i = 0; i < ARRAY_SIZE(types); i++) {
 		struct btrfs_space_info *tmp;
 
 		sinfo = NULL;
@@ -3476,44 +3495,21 @@
 
 		down_read(&sinfo->groups_sem);
 		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
-			if (!list_empty(&sinfo->block_groups[c])) {
-				u64 flags;
+			u64 flags;
 
-				btrfs_get_block_group_info(
-					&sinfo->block_groups[c], &space);
-				if (space.total_bytes == 0 ||
-				    space.used_bytes == 0)
-					continue;
-				flags = space.flags;
-				/*
-				 * return
-				 * 0: if dup, single or RAID0 is configured for
-				 *    any of metadata, system or data, else
-				 * 1: if RAID5 is configured, or if RAID1 or
-				 *    RAID10 is configured and only two mirrors
-				 *    are used, else
-				 * 2: if RAID6 is configured, else
-				 * num_mirrors - 1: if RAID1 or RAID10 is
-				 *                  configured and more than
-				 *                  2 mirrors are used.
-				 */
-				if (num_tolerated_disk_barrier_failures > 0 &&
-				    ((flags & (BTRFS_BLOCK_GROUP_DUP |
-					       BTRFS_BLOCK_GROUP_RAID0)) ||
-				     ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
-				      == 0)))
-					num_tolerated_disk_barrier_failures = 0;
-				else if (num_tolerated_disk_barrier_failures > 1) {
-					if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
-					    BTRFS_BLOCK_GROUP_RAID5 |
-					    BTRFS_BLOCK_GROUP_RAID10)) {
-						num_tolerated_disk_barrier_failures = 1;
-					} else if (flags &
-						   BTRFS_BLOCK_GROUP_RAID6) {
-						num_tolerated_disk_barrier_failures = 2;
-					}
-				}
-			}
+			if (list_empty(&sinfo->block_groups[c]))
+				continue;
+
+			btrfs_get_block_group_info(&sinfo->block_groups[c],
+						   &space);
+			if (space.total_bytes == 0 || space.used_bytes == 0)
+				continue;
+			flags = space.flags;
+
+			num_tolerated_disk_barrier_failures = min(
+				num_tolerated_disk_barrier_failures,
+				btrfs_get_num_tolerated_disk_barrier_failures(
+					flags));
 		}
 		up_read(&sinfo->groups_sem);
 	}
@@ -3769,9 +3765,7 @@
 		 * block groups queued for removal, the deletion will be
 		 * skipped when we quit the cleaner thread.
 		 */
-		mutex_lock(&root->fs_info->cleaner_mutex);
 		btrfs_delete_unused_bgs(root->fs_info);
-		mutex_unlock(&root->fs_info->cleaner_mutex);
 
 		ret = btrfs_commit_super(root);
 		if (ret)
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index d4cbfee..bdfb479 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -139,6 +139,7 @@
 				     u64 objectid);
 int btree_lock_page_hook(struct page *page, void *data,
 				void (*flush_fn)(void *));
+int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
 int btrfs_calc_num_tolerated_disk_barrier_failures(
 	struct btrfs_fs_info *fs_info);
 int __init btrfs_end_io_wq_init(void);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 5411f0a..9f96042 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3742,10 +3742,7 @@
 	found->bytes_reserved = 0;
 	found->bytes_readonly = 0;
 	found->bytes_may_use = 0;
-	if (total_bytes > 0)
-		found->full = 0;
-	else
-		found->full = 1;
+	found->full = 0;
 	found->force_alloc = CHUNK_ALLOC_NO_FORCE;
 	found->chunk_alloc = 0;
 	found->flush = 0;
@@ -8668,7 +8665,7 @@
 	}
 
 	if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
-		btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
+		btrfs_add_dropped_root(trans, root);
 	} else {
 		free_extent_buffer(root->node);
 		free_extent_buffer(root->commit_root);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index f1018cf..e2357e3 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2798,7 +2798,8 @@
 			      bio_end_io_t end_io_func,
 			      int mirror_num,
 			      unsigned long prev_bio_flags,
-			      unsigned long bio_flags)
+			      unsigned long bio_flags,
+			      bool force_bio_submit)
 {
 	int ret = 0;
 	struct bio *bio;
@@ -2814,6 +2815,7 @@
 			contig = bio_end_sector(bio) == sector;
 
 		if (prev_bio_flags != bio_flags || !contig ||
+		    force_bio_submit ||
 		    merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
 		    bio_add_page(bio, page, page_size, offset) < page_size) {
 			ret = submit_one_bio(rw, bio, mirror_num,
@@ -2910,7 +2912,8 @@
 			 get_extent_t *get_extent,
 			 struct extent_map **em_cached,
 			 struct bio **bio, int mirror_num,
-			 unsigned long *bio_flags, int rw)
+			 unsigned long *bio_flags, int rw,
+			 u64 *prev_em_start)
 {
 	struct inode *inode = page->mapping->host;
 	u64 start = page_offset(page);
@@ -2958,6 +2961,7 @@
 	}
 	while (cur <= end) {
 		unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
+		bool force_bio_submit = false;
 
 		if (cur >= last_byte) {
 			char *userpage;
@@ -3008,6 +3012,49 @@
 		block_start = em->block_start;
 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
 			block_start = EXTENT_MAP_HOLE;
+
+		/*
+		 * If we have a file range that points to a compressed extent
+		 * and it's followed by a consecutive file range that points to
+		 * to the same compressed extent (possibly with a different
+		 * offset and/or length, so it either points to the whole extent
+		 * or only part of it), we must make sure we do not submit a
+		 * single bio to populate the pages for the 2 ranges because
+		 * this makes the compressed extent read zero out the pages
+		 * belonging to the 2nd range. Imagine the following scenario:
+		 *
+		 *  File layout
+		 *  [0 - 8K]                     [8K - 24K]
+		 *    |                               |
+		 *    |                               |
+		 * points to extent X,         points to extent X,
+		 * offset 4K, length of 8K     offset 0, length 16K
+		 *
+		 * [extent X, compressed length = 4K uncompressed length = 16K]
+		 *
+		 * If the bio to read the compressed extent covers both ranges,
+		 * it will decompress extent X into the pages belonging to the
+		 * first range and then it will stop, zeroing out the remaining
+		 * pages that belong to the other range that points to extent X.
+		 * So here we make sure we submit 2 bios, one for the first
+		 * range and another one for the third range. Both will target
+		 * the same physical extent from disk, but we can't currently
+		 * make the compressed bio endio callback populate the pages
+		 * for both ranges because each compressed bio is tightly
+		 * coupled with a single extent map, and each range can have
+		 * an extent map with a different offset value relative to the
+		 * uncompressed data of our extent and different lengths. This
+		 * is a corner case so we prioritize correctness over
+		 * non-optimal behavior (submitting 2 bios for the same extent).
+		 */
+		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
+		    prev_em_start && *prev_em_start != (u64)-1 &&
+		    *prev_em_start != em->orig_start)
+			force_bio_submit = true;
+
+		if (prev_em_start)
+			*prev_em_start = em->orig_start;
+
 		free_extent_map(em);
 		em = NULL;
 
@@ -3057,7 +3104,8 @@
 					 bdev, bio, pnr,
 					 end_bio_extent_readpage, mirror_num,
 					 *bio_flags,
-					 this_bio_flag);
+					 this_bio_flag,
+					 force_bio_submit);
 		if (!ret) {
 			nr++;
 			*bio_flags = this_bio_flag;
@@ -3089,6 +3137,7 @@
 	struct inode *inode;
 	struct btrfs_ordered_extent *ordered;
 	int index;
+	u64 prev_em_start = (u64)-1;
 
 	inode = pages[0]->mapping->host;
 	while (1) {
@@ -3104,7 +3153,7 @@
 
 	for (index = 0; index < nr_pages; index++) {
 		__do_readpage(tree, pages[index], get_extent, em_cached, bio,
-			      mirror_num, bio_flags, rw);
+			      mirror_num, bio_flags, rw, &prev_em_start);
 		page_cache_release(pages[index]);
 	}
 }
@@ -3172,7 +3221,7 @@
 	}
 
 	ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
-			    bio_flags, rw);
+			    bio_flags, rw, NULL);
 	return ret;
 }
 
@@ -3198,7 +3247,7 @@
 	int ret;
 
 	ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
-				      &bio_flags, READ);
+			    &bio_flags, READ, NULL);
 	if (bio)
 		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
 	return ret;
@@ -3451,7 +3500,7 @@
 						 sector, iosize, pg_offset,
 						 bdev, &epd->bio, max_nr,
 						 end_bio_extent_writepage,
-						 0, 0, 0);
+						 0, 0, 0, false);
 			if (ret)
 				SetPageError(page);
 		}
@@ -3754,7 +3803,7 @@
 		ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
 					 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
 					 -1, end_bio_extent_buffer_writepage,
-					 0, epd->bio_flags, bio_flags);
+					 0, epd->bio_flags, bio_flags, false);
 		epd->bio_flags = bio_flags;
 		if (ret) {
 			set_btree_ioerr(p);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 237da01..611b66d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5084,7 +5084,8 @@
 		goto no_delete;
 	}
 	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
-	btrfs_wait_ordered_range(inode, 0, (u64)-1);
+	if (!special_file(inode->i_mode))
+		btrfs_wait_ordered_range(inode, 0, (u64)-1);
 
 	btrfs_free_io_failure_record(inode, 0, (u64)-1);
 
@@ -6909,8 +6910,7 @@
 
 	trace_btrfs_get_extent(root, em);
 
-	if (path)
-		btrfs_free_path(path);
+	btrfs_free_path(path);
 	if (trans) {
 		ret = btrfs_end_transaction(trans, root);
 		if (!err)
@@ -7409,6 +7409,10 @@
 	return em;
 }
 
+struct btrfs_dio_data {
+	u64 outstanding_extents;
+	u64 reserve;
+};
 
 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
 				   struct buffer_head *bh_result, int create)
@@ -7416,10 +7420,10 @@
 	struct extent_map *em;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct extent_state *cached_state = NULL;
+	struct btrfs_dio_data *dio_data = NULL;
 	u64 start = iblock << inode->i_blkbits;
 	u64 lockstart, lockend;
 	u64 len = bh_result->b_size;
-	u64 *outstanding_extents = NULL;
 	int unlock_bits = EXTENT_LOCKED;
 	int ret = 0;
 
@@ -7437,7 +7441,7 @@
 		 * that anything that needs to check if there's a transction doesn't get
 		 * confused.
 		 */
-		outstanding_extents = current->journal_info;
+		dio_data = current->journal_info;
 		current->journal_info = NULL;
 	}
 
@@ -7569,17 +7573,18 @@
 		 * within our reservation, otherwise we need to adjust our inode
 		 * counter appropriately.
 		 */
-		if (*outstanding_extents) {
-			(*outstanding_extents)--;
+		if (dio_data->outstanding_extents) {
+			(dio_data->outstanding_extents)--;
 		} else {
 			spin_lock(&BTRFS_I(inode)->lock);
 			BTRFS_I(inode)->outstanding_extents++;
 			spin_unlock(&BTRFS_I(inode)->lock);
 		}
 
-		current->journal_info = outstanding_extents;
 		btrfs_free_reserved_data_space(inode, len);
-		set_bit(BTRFS_INODE_DIO_READY, &BTRFS_I(inode)->runtime_flags);
+		WARN_ON(dio_data->reserve < len);
+		dio_data->reserve -= len;
+		current->journal_info = dio_data;
 	}
 
 	/*
@@ -7602,8 +7607,8 @@
 unlock_err:
 	clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
 			 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
-	if (outstanding_extents)
-		current->journal_info = outstanding_extents;
+	if (dio_data)
+		current->journal_info = dio_data;
 	return ret;
 }
 
@@ -8330,7 +8335,8 @@
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
-	u64 outstanding_extents = 0;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_dio_data dio_data = { 0 };
 	size_t count = 0;
 	int flags = 0;
 	bool wakeup = true;
@@ -8368,7 +8374,7 @@
 		ret = btrfs_delalloc_reserve_space(inode, count);
 		if (ret)
 			goto out;
-		outstanding_extents = div64_u64(count +
+		dio_data.outstanding_extents = div64_u64(count +
 						BTRFS_MAX_EXTENT_SIZE - 1,
 						BTRFS_MAX_EXTENT_SIZE);
 
@@ -8377,7 +8383,8 @@
 		 * do the accounting properly if we go over the number we
 		 * originally calculated.  Abuse current->journal_info for this.
 		 */
-		current->journal_info = &outstanding_extents;
+		dio_data.reserve = round_up(count, root->sectorsize);
+		current->journal_info = &dio_data;
 	} else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
 				     &BTRFS_I(inode)->runtime_flags)) {
 		inode_dio_end(inode);
@@ -8392,16 +8399,9 @@
 	if (iov_iter_rw(iter) == WRITE) {
 		current->journal_info = NULL;
 		if (ret < 0 && ret != -EIOCBQUEUED) {
-			/*
-			 * If the error comes from submitting stage,
-			 * btrfs_get_blocsk_direct() has free'd data space,
-			 * and metadata space will be handled by
-			 * finish_ordered_fn, don't do that again to make
-			 * sure bytes_may_use is correct.
-			 */
-			if (!test_and_clear_bit(BTRFS_INODE_DIO_READY,
-				     &BTRFS_I(inode)->runtime_flags))
-				btrfs_delalloc_release_space(inode, count);
+			if (dio_data.reserve)
+				btrfs_delalloc_release_space(inode,
+							dio_data.reserve);
 		} else if (ret >= 0 && (size_t)ret < count)
 			btrfs_delalloc_release_space(inode,
 						     count - (size_t)ret);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 9a11db0..a39f5d1 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3267,13 +3267,13 @@
 			scrub_blocked_if_needed(fs_info);
 		}
 
-		/* for raid56, we skip parity stripe */
 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
 			ret = get_raid56_logic_offset(physical, num, map,
 						      &logical,
 						      &stripe_logical);
 			logical += base;
 			if (ret) {
+				/* it is parity strip */
 				stripe_logical += base;
 				stripe_end = stripe_logical + increment;
 				ret = scrub_raid56_parity(sctx, map, scrub_dev,
@@ -3480,7 +3480,6 @@
 
 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
 					  struct btrfs_device *scrub_dev,
-					  u64 chunk_tree, u64 chunk_objectid,
 					  u64 chunk_offset, u64 length,
 					  u64 dev_offset, int is_dev_replace)
 {
@@ -3531,8 +3530,6 @@
 	struct btrfs_root *root = sctx->dev_root;
 	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 length;
-	u64 chunk_tree;
-	u64 chunk_objectid;
 	u64 chunk_offset;
 	int ret = 0;
 	int slot;
@@ -3596,8 +3593,6 @@
 		if (found_key.offset + length <= start)
 			goto skip;
 
-		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
-		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
 
 		/*
@@ -3630,9 +3625,8 @@
 		dev_replace->cursor_right = found_key.offset + length;
 		dev_replace->cursor_left = found_key.offset;
 		dev_replace->item_needs_writeback = 1;
-		ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
-				  chunk_offset, length, found_key.offset,
-				  is_dev_replace);
+		ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
+				  found_key.offset, is_dev_replace);
 
 		/*
 		 * flush, submit all pending read and write bios, afterwards
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 2b07b35..11d1eab 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1658,9 +1658,7 @@
 		 * groups on disk until we're mounted read-write again
 		 * unless we clean them up here.
 		 */
-		mutex_lock(&root->fs_info->cleaner_mutex);
 		btrfs_delete_unused_bgs(fs_info);
-		mutex_unlock(&root->fs_info->cleaner_mutex);
 
 		btrfs_dev_replace_suspend_for_unmount(fs_info);
 		btrfs_scrub_cancel(fs_info);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8f259b3..74bc333 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -117,6 +117,18 @@
 			btrfs_unpin_free_ino(root);
 		clear_btree_io_tree(&root->dirty_log_pages);
 	}
+
+	/* We can free old roots now. */
+	spin_lock(&trans->dropped_roots_lock);
+	while (!list_empty(&trans->dropped_roots)) {
+		root = list_first_entry(&trans->dropped_roots,
+					struct btrfs_root, root_list);
+		list_del_init(&root->root_list);
+		spin_unlock(&trans->dropped_roots_lock);
+		btrfs_drop_and_free_fs_root(fs_info, root);
+		spin_lock(&trans->dropped_roots_lock);
+	}
+	spin_unlock(&trans->dropped_roots_lock);
 	up_write(&fs_info->commit_root_sem);
 }
 
@@ -255,11 +267,13 @@
 	INIT_LIST_HEAD(&cur_trans->pending_ordered);
 	INIT_LIST_HEAD(&cur_trans->dirty_bgs);
 	INIT_LIST_HEAD(&cur_trans->io_bgs);
+	INIT_LIST_HEAD(&cur_trans->dropped_roots);
 	mutex_init(&cur_trans->cache_write_mutex);
 	cur_trans->num_dirty_bgs = 0;
 	spin_lock_init(&cur_trans->dirty_bgs_lock);
 	INIT_LIST_HEAD(&cur_trans->deleted_bgs);
 	spin_lock_init(&cur_trans->deleted_bgs_lock);
+	spin_lock_init(&cur_trans->dropped_roots_lock);
 	list_add_tail(&cur_trans->list, &fs_info->trans_list);
 	extent_io_tree_init(&cur_trans->dirty_pages,
 			     fs_info->btree_inode->i_mapping);
@@ -336,6 +350,24 @@
 }
 
 
+void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root)
+{
+	struct btrfs_transaction *cur_trans = trans->transaction;
+
+	/* Add ourselves to the transaction dropped list */
+	spin_lock(&cur_trans->dropped_roots_lock);
+	list_add_tail(&root->root_list, &cur_trans->dropped_roots);
+	spin_unlock(&cur_trans->dropped_roots_lock);
+
+	/* Make sure we don't try to update the root at commit time */
+	spin_lock(&root->fs_info->fs_roots_radix_lock);
+	radix_tree_tag_clear(&root->fs_info->fs_roots_radix,
+			     (unsigned long)root->root_key.objectid,
+			     BTRFS_ROOT_TRANS_TAG);
+	spin_unlock(&root->fs_info->fs_roots_radix_lock);
+}
+
 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
 			       struct btrfs_root *root)
 {
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index edc2fbc..87964bf 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -65,6 +65,7 @@
 	struct list_head switch_commits;
 	struct list_head dirty_bgs;
 	struct list_head io_bgs;
+	struct list_head dropped_roots;
 	u64 num_dirty_bgs;
 
 	/*
@@ -76,6 +77,7 @@
 	spinlock_t dirty_bgs_lock;
 	struct list_head deleted_bgs;
 	spinlock_t deleted_bgs_lock;
+	spinlock_t dropped_roots_lock;
 	struct btrfs_delayed_ref_root delayed_refs;
 	int aborted;
 	int dirty_bg_run;
@@ -216,5 +218,6 @@
 int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
 void btrfs_put_transaction(struct btrfs_transaction *transaction);
 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info);
-
+void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root);
 #endif
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index a4b9c8b..f31db43 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -115,8 +115,7 @@
 		ret = -EAGAIN;
 	}
 out:
-	if (path)
-		btrfs_free_path(path);
+	btrfs_free_path(path);
 	if (ret == -EAGAIN) {
 		if (root->defrag_max.objectid > root->defrag_progress.objectid)
 			goto done;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 76201d6..6fc73586 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3585,23 +3585,10 @@
 	} while (read_seqretry(&fs_info->profiles_lock, seq));
 
 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
-		int num_tolerated_disk_barrier_failures;
-		u64 target = bctl->sys.target;
-
-		num_tolerated_disk_barrier_failures =
-			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
-		if (num_tolerated_disk_barrier_failures > 0 &&
-		    (target &
-		     (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
-		      BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
-			num_tolerated_disk_barrier_failures = 0;
-		else if (num_tolerated_disk_barrier_failures > 1 &&
-			 (target &
-			  (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
-			num_tolerated_disk_barrier_failures = 1;
-
-		fs_info->num_tolerated_disk_barrier_failures =
-			num_tolerated_disk_barrier_failures;
+		fs_info->num_tolerated_disk_barrier_failures = min(
+			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
+			btrfs_get_num_tolerated_disk_barrier_failures(
+				bctl->sys.target));
 	}
 
 	ret = insert_balance_item(fs_info->tree_root, bctl);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index a268abf..9d23e78 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -276,7 +276,7 @@
 	for (i = 0; i < num_pages; i++) {
 		struct page *page = osd_data->pages[i];
 
-		if (rc < 0)
+		if (rc < 0 && rc != ENOENT)
 			goto unlock;
 		if (bytes < (int)PAGE_CACHE_SIZE) {
 			/* zero (remainder of) page */
@@ -717,8 +717,10 @@
 	     wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
 	     (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
 
-	if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
+	if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
 		pr_warn("writepage_start %p on forced umount\n", inode);
+		truncate_pagecache(inode, 0);
+		mapping_set_error(mapping, -EIO);
 		return -EIO; /* we're in a forced umount, don't write! */
 	}
 	if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index ddd5e94..27b5668 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2413,6 +2413,14 @@
 			goto out_unlock;
 		}
 
+		if (!__ceph_is_any_caps(ci) &&
+		    ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
+			dout("get_cap_refs %p forced umount\n", inode);
+			*err = -EIO;
+			ret = 1;
+			goto out_unlock;
+		}
+
 		dout("get_cap_refs %p have %s needed %s\n", inode,
 		     ceph_cap_string(have), ceph_cap_string(need));
 	}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 8b79d87..0c62868 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -136,7 +136,6 @@
 	struct ceph_mds_client *mdsc = fsc->mdsc;
 	struct ceph_mds_request *req;
 	struct ceph_file_info *cf = file->private_data;
-	struct inode *parent_inode = NULL;
 	int err;
 	int flags, fmode, wanted;
 
@@ -210,10 +209,7 @@
 	ihold(inode);
 
 	req->r_num_caps = 1;
-	if (flags & O_CREAT)
-		parent_inode = ceph_get_dentry_parent_inode(file->f_path.dentry);
-	err = ceph_mdsc_do_request(mdsc, parent_inode, req);
-	iput(parent_inode);
+	err = ceph_mdsc_do_request(mdsc, NULL, req);
 	if (!err)
 		err = ceph_init_file(inode, file, req->r_fmode);
 	ceph_mdsc_put_request(req);
@@ -279,7 +275,7 @@
 	if (err)
 		goto out_req;
 
-	if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
+	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
 		err = ceph_handle_notrace_create(dir, dentry);
 
 	if (d_unhashed(dentry)) {
@@ -956,6 +952,12 @@
 	/* We can write back this queue in page reclaim */
 	current->backing_dev_info = inode_to_bdi(inode);
 
+	if (iocb->ki_flags & IOCB_APPEND) {
+		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
+		if (err < 0)
+			goto out;
+	}
+
 	err = generic_write_checks(iocb, from);
 	if (err <= 0)
 		goto out;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 6aa07af..51cb02d 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2107,7 +2107,6 @@
 	msg = create_request_message(mdsc, req, mds, drop_cap_releases);
 	if (IS_ERR(msg)) {
 		req->r_err = PTR_ERR(msg);
-		complete_request(mdsc, req);
 		return PTR_ERR(msg);
 	}
 	req->r_request = msg;
@@ -2135,7 +2134,7 @@
 {
 	struct ceph_mds_session *session = NULL;
 	int mds = -1;
-	int err = -EAGAIN;
+	int err = 0;
 
 	if (req->r_err || req->r_got_result) {
 		if (req->r_aborted)
@@ -2149,6 +2148,11 @@
 		err = -EIO;
 		goto finish;
 	}
+	if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
+		dout("do_request forced umount\n");
+		err = -EIO;
+		goto finish;
+	}
 
 	put_request_session(req);
 
@@ -2196,13 +2200,15 @@
 
 out_session:
 	ceph_put_mds_session(session);
+finish:
+	if (err) {
+		dout("__do_request early error %d\n", err);
+		req->r_err = err;
+		complete_request(mdsc, req);
+		__unregister_request(mdsc, req);
+	}
 out:
 	return err;
-
-finish:
-	req->r_err = err;
-	complete_request(mdsc, req);
-	goto out;
 }
 
 /*
@@ -2289,8 +2295,6 @@
 
 	if (req->r_err) {
 		err = req->r_err;
-		__unregister_request(mdsc, req);
-		dout("do_request early error %d\n", err);
 		goto out;
 	}
 
@@ -2411,7 +2415,7 @@
 		mutex_unlock(&mdsc->mutex);
 		goto out;
 	}
-	if (req->r_got_safe && !head->safe) {
+	if (req->r_got_safe) {
 		pr_warn("got unsafe after safe on %llu from mds%d\n",
 			   tid, mds);
 		mutex_unlock(&mdsc->mutex);
@@ -2520,8 +2524,7 @@
 		if (err) {
 			req->r_err = err;
 		} else {
-			req->r_reply = msg;
-			ceph_msg_get(msg);
+			req->r_reply =  ceph_msg_get(msg);
 			req->r_got_result = true;
 		}
 	} else {
@@ -3555,7 +3558,7 @@
 {
 	u64 want_tid, want_flush, want_snap;
 
-	if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
+	if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
 		return;
 
 	dout("sync\n");
@@ -3584,7 +3587,7 @@
  */
 static bool done_closing_sessions(struct ceph_mds_client *mdsc)
 {
-	if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
+	if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
 		return true;
 	return atomic_read(&mdsc->num_sessions) == 0;
 }
@@ -3643,6 +3646,34 @@
 	dout("stopped\n");
 }
 
+void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
+{
+	struct ceph_mds_session *session;
+	int mds;
+
+	dout("force umount\n");
+
+	mutex_lock(&mdsc->mutex);
+	for (mds = 0; mds < mdsc->max_sessions; mds++) {
+		session = __ceph_lookup_mds_session(mdsc, mds);
+		if (!session)
+			continue;
+		mutex_unlock(&mdsc->mutex);
+		mutex_lock(&session->s_mutex);
+		__close_session(mdsc, session);
+		if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
+			cleanup_session_requests(mdsc, session);
+			remove_session_caps(session);
+		}
+		mutex_unlock(&session->s_mutex);
+		ceph_put_mds_session(session);
+		mutex_lock(&mdsc->mutex);
+		kick_requests(mdsc, mds);
+	}
+	__wake_requests(mdsc, &mdsc->waiting_for_map);
+	mutex_unlock(&mdsc->mutex);
+}
+
 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
 {
 	dout("stop\n");
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 762757e..f575eaf 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -366,6 +366,7 @@
 
 extern int ceph_mdsc_init(struct ceph_fs_client *fsc);
 extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
+extern void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc);
 extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc);
 
 extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc);
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 233d906a..4aa7122 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -338,12 +338,6 @@
 		return 0;
 	}
 
-	if (num == 0 && realm->seq == ceph_empty_snapc->seq) {
-		ceph_get_snap_context(ceph_empty_snapc);
-		snapc = ceph_empty_snapc;
-		goto done;
-	}
-
 	/* alloc new snap context */
 	err = -ENOMEM;
 	if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
@@ -381,7 +375,6 @@
 	     realm->ino, realm, snapc, snapc->seq,
 	     (unsigned int) snapc->num_snaps);
 
-done:
 	ceph_put_snap_context(realm->cached_context);
 	realm->cached_context = snapc;
 	return 0;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 7b6bfcb..f446afa 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -708,6 +708,7 @@
 	if (!fsc)
 		return;
 	fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
+	ceph_mdsc_force_umount(fsc->mdsc);
 	return;
 }
 
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index aa0dc25..afa09fc 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -444,6 +444,48 @@
 	return 0;
 }
 
+/* Server has provided av pairs/target info in the type 2 challenge
+ * packet and we have plucked it and stored within smb session.
+ * We parse that blob here to find the server given timestamp
+ * as part of ntlmv2 authentication (or local current time as
+ * default in case of failure)
+ */
+static __le64
+find_timestamp(struct cifs_ses *ses)
+{
+	unsigned int attrsize;
+	unsigned int type;
+	unsigned int onesize = sizeof(struct ntlmssp2_name);
+	unsigned char *blobptr;
+	unsigned char *blobend;
+	struct ntlmssp2_name *attrptr;
+
+	if (!ses->auth_key.len || !ses->auth_key.response)
+		return 0;
+
+	blobptr = ses->auth_key.response;
+	blobend = blobptr + ses->auth_key.len;
+
+	while (blobptr + onesize < blobend) {
+		attrptr = (struct ntlmssp2_name *) blobptr;
+		type = le16_to_cpu(attrptr->type);
+		if (type == NTLMSSP_AV_EOL)
+			break;
+		blobptr += 2; /* advance attr type */
+		attrsize = le16_to_cpu(attrptr->length);
+		blobptr += 2; /* advance attr size */
+		if (blobptr + attrsize > blobend)
+			break;
+		if (type == NTLMSSP_AV_TIMESTAMP) {
+			if (attrsize == sizeof(u64))
+				return *((__le64 *)blobptr);
+		}
+		blobptr += attrsize; /* advance attr value */
+	}
+
+	return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+}
+
 static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
 			    const struct nls_table *nls_cp)
 {
@@ -641,6 +683,7 @@
 	struct ntlmv2_resp *ntlmv2;
 	char ntlmv2_hash[16];
 	unsigned char *tiblob = NULL; /* target info blob */
+	__le64 rsp_timestamp;
 
 	if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
 		if (!ses->domainName) {
@@ -659,6 +702,12 @@
 		}
 	}
 
+	/* Must be within 5 minutes of the server (or in range +/-2h
+	 * in case of Mac OS X), so simply carry over server timestamp
+	 * (as Windows 7 does)
+	 */
+	rsp_timestamp = find_timestamp(ses);
+
 	baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
 	tilen = ses->auth_key.len;
 	tiblob = ses->auth_key.response;
@@ -675,8 +724,8 @@
 			(ses->auth_key.response + CIFS_SESS_KEY_SIZE);
 	ntlmv2->blob_signature = cpu_to_le32(0x00000101);
 	ntlmv2->reserved = 0;
-	/* Must be within 5 minutes of the server */
-	ntlmv2->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+	ntlmv2->time = rsp_timestamp;
+
 	get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal));
 	ntlmv2->reserved2 = 0;
 
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 6a1119e..e739950 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -325,8 +325,11 @@
 static void
 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
 {
-	if (ses->sectype == Unspecified)
+	if (ses->sectype == Unspecified) {
+		if (ses->user_name == NULL)
+			seq_puts(s, ",sec=none");
 		return;
+	}
 
 	seq_puts(s, ",sec=");
 
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index c63f522..28a77bf 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -67,6 +67,12 @@
 		goto out_drop_write;
 	}
 
+	if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) {
+		rc = -EBADF;
+		cifs_dbg(VFS, "src file seems to be from a different filesystem type\n");
+		goto out_fput;
+	}
+
 	if ((!src_file.file->private_data) || (!dst_file->private_data)) {
 		rc = -EBADF;
 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index df91bcf..18da19f 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -50,9 +50,13 @@
 		break;
 	default:
 		server->echoes = true;
-		server->oplocks = true;
+		if (enable_oplocks) {
+			server->oplocks = true;
+			server->oplock_credits = 1;
+		} else
+			server->oplocks = false;
+
 		server->echo_credits = 1;
-		server->oplock_credits = 1;
 	}
 	server->credits -= server->echo_credits + server->oplock_credits;
 	return 0;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 070fb2a..ce83e2e 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -46,6 +46,7 @@
 #include "smb2status.h"
 #include "smb2glob.h"
 #include "cifspdu.h"
+#include "cifs_spnego.h"
 
 /*
  *  The following table defines the expected "StructureSize" of SMB2 requests
@@ -486,19 +487,15 @@
 		cifs_dbg(FYI, "missing security blob on negprot\n");
 
 	rc = cifs_enable_signing(server, ses->sign);
-#ifdef CONFIG_SMB2_ASN1  /* BB REMOVEME when updated asn1.c ready */
 	if (rc)
 		goto neg_exit;
-	if (blob_length)
+	if (blob_length) {
 		rc = decode_negTokenInit(security_blob, blob_length, server);
-	if (rc == 1)
-		rc = 0;
-	else if (rc == 0) {
-		rc = -EIO;
-		goto neg_exit;
+		if (rc == 1)
+			rc = 0;
+		else if (rc == 0)
+			rc = -EIO;
 	}
-#endif
-
 neg_exit:
 	free_rsp_buf(resp_buftype, rsp);
 	return rc;
@@ -592,7 +589,8 @@
 	__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
 	struct TCP_Server_Info *server = ses->server;
 	u16 blob_length = 0;
-	char *security_blob;
+	struct key *spnego_key = NULL;
+	char *security_blob = NULL;
 	char *ntlmssp_blob = NULL;
 	bool use_spnego = false; /* else use raw ntlmssp */
 
@@ -620,7 +618,8 @@
 	ses->ntlmssp->sesskey_per_smbsess = true;
 
 	/* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
-	ses->sectype = RawNTLMSSP;
+	if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
+		ses->sectype = RawNTLMSSP;
 
 ssetup_ntlmssp_authenticate:
 	if (phase == NtLmChallenge)
@@ -649,7 +648,48 @@
 	iov[0].iov_base = (char *)req;
 	/* 4 for rfc1002 length field and 1 for pad */
 	iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
-	if (phase == NtLmNegotiate) {
+
+	if (ses->sectype == Kerberos) {
+#ifdef CONFIG_CIFS_UPCALL
+		struct cifs_spnego_msg *msg;
+
+		spnego_key = cifs_get_spnego_key(ses);
+		if (IS_ERR(spnego_key)) {
+			rc = PTR_ERR(spnego_key);
+			spnego_key = NULL;
+			goto ssetup_exit;
+		}
+
+		msg = spnego_key->payload.data;
+		/*
+		 * check version field to make sure that cifs.upcall is
+		 * sending us a response in an expected form
+		 */
+		if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
+			cifs_dbg(VFS,
+				  "bad cifs.upcall version. Expected %d got %d",
+				  CIFS_SPNEGO_UPCALL_VERSION, msg->version);
+			rc = -EKEYREJECTED;
+			goto ssetup_exit;
+		}
+		ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+						 GFP_KERNEL);
+		if (!ses->auth_key.response) {
+			cifs_dbg(VFS,
+				"Kerberos can't allocate (%u bytes) memory",
+				msg->sesskey_len);
+			rc = -ENOMEM;
+			goto ssetup_exit;
+		}
+		ses->auth_key.len = msg->sesskey_len;
+		blob_length = msg->secblob_len;
+		iov[1].iov_base = msg->data + msg->sesskey_len;
+		iov[1].iov_len = blob_length;
+#else
+		rc = -EOPNOTSUPP;
+		goto ssetup_exit;
+#endif /* CONFIG_CIFS_UPCALL */
+	} else if (phase == NtLmNegotiate) { /* if not krb5 must be ntlmssp */
 		ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
 				       GFP_KERNEL);
 		if (ntlmssp_blob == NULL) {
@@ -672,6 +712,8 @@
 			/* with raw NTLMSSP we don't encapsulate in SPNEGO */
 			security_blob = ntlmssp_blob;
 		}
+		iov[1].iov_base = security_blob;
+		iov[1].iov_len = blob_length;
 	} else if (phase == NtLmAuthenticate) {
 		req->hdr.SessionId = ses->Suid;
 		ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
@@ -699,6 +741,8 @@
 		} else {
 			security_blob = ntlmssp_blob;
 		}
+		iov[1].iov_base = security_blob;
+		iov[1].iov_len = blob_length;
 	} else {
 		cifs_dbg(VFS, "illegal ntlmssp phase\n");
 		rc = -EIO;
@@ -710,8 +754,6 @@
 				cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
 					    1 /* pad */ - 4 /* rfc1001 len */);
 	req->SecurityBufferLength = cpu_to_le16(blob_length);
-	iov[1].iov_base = security_blob;
-	iov[1].iov_len = blob_length;
 
 	inc_rfc1001_len(req, blob_length - 1 /* pad */);
 
@@ -722,6 +764,7 @@
 
 	kfree(security_blob);
 	rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
+	ses->Suid = rsp->hdr.SessionId;
 	if (resp_buftype != CIFS_NO_BUFFER &&
 	    rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
 		if (phase != NtLmNegotiate) {
@@ -739,7 +782,6 @@
 		/* NTLMSSP Negotiate sent now processing challenge (response) */
 		phase = NtLmChallenge; /* process ntlmssp challenge */
 		rc = 0; /* MORE_PROCESSING is not an error here but expected */
-		ses->Suid = rsp->hdr.SessionId;
 		rc = decode_ntlmssp_challenge(rsp->Buffer,
 				le16_to_cpu(rsp->SecurityBufferLength), ses);
 	}
@@ -796,6 +838,10 @@
 		kfree(ses->auth_key.response);
 		ses->auth_key.response = NULL;
 	}
+	if (spnego_key) {
+		key_invalidate(spnego_key);
+		key_put(spnego_key);
+	}
 	kfree(ses->ntlmssp);
 
 	return rc;
@@ -876,6 +922,12 @@
 	if (tcon && tcon->bad_network_name)
 		return -ENOENT;
 
+	if ((tcon->seal) &&
+	    ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
+		cifs_dbg(VFS, "encryption requested but no server support");
+		return -EOPNOTSUPP;
+	}
+
 	unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
 	if (unc_path == NULL)
 		return -ENOMEM;
@@ -955,6 +1007,8 @@
 	    ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
 		cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
 	init_copy_chunk_defaults(tcon);
+	if (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)
+		cifs_dbg(VFS, "Encrypted shares not supported");
 	if (tcon->ses->server->ops->validate_negotiate)
 		rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
 tcon_exit:
diff --git a/fs/dax.c b/fs/dax.c
index 93bf2f9..bcfb14b 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -119,7 +119,8 @@
 		size_t len;
 		if (pos == max) {
 			unsigned blkbits = inode->i_blkbits;
-			sector_t block = pos >> blkbits;
+			long page = pos >> PAGE_SHIFT;
+			sector_t block = page << (PAGE_SHIFT - blkbits);
 			unsigned first = pos - (block << blkbits);
 			long size;
 
@@ -568,8 +569,20 @@
 	if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
 		goto fallback;
 
+	sector = bh.b_blocknr << (blkbits - 9);
+
 	if (buffer_unwritten(&bh) || buffer_new(&bh)) {
 		int i;
+
+		length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
+						bh.b_size);
+		if (length < 0) {
+			result = VM_FAULT_SIGBUS;
+			goto out;
+		}
+		if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
+			goto fallback;
+
 		for (i = 0; i < PTRS_PER_PMD; i++)
 			clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
 		wmb_pmem();
@@ -622,7 +635,6 @@
 		result = VM_FAULT_NOPAGE;
 		spin_unlock(ptl);
 	} else {
-		sector = bh.b_blocknr << (blkbits - 9);
 		length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
 						bh.b_size);
 		if (length < 0) {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 2448912..091a364 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1380,6 +1380,10 @@
  * Write a portion of b_io inodes which belong to @sb.
  *
  * Return the number of pages and/or inodes written.
+ *
+ * NOTE! This is called with wb->list_lock held, and will
+ * unlock and relock that for each inode it ends up doing
+ * IO for.
  */
 static long writeback_sb_inodes(struct super_block *sb,
 				struct bdi_writeback *wb,
@@ -1398,9 +1402,7 @@
 	unsigned long start_time = jiffies;
 	long write_chunk;
 	long wrote = 0;  /* count both pages and inodes */
-	struct blk_plug plug;
 
-	blk_start_plug(&plug);
 	while (!list_empty(&wb->b_io)) {
 		struct inode *inode = wb_inode(wb->b_io.prev);
 
@@ -1479,6 +1481,21 @@
 		wbc_detach_inode(&wbc);
 		work->nr_pages -= write_chunk - wbc.nr_to_write;
 		wrote += write_chunk - wbc.nr_to_write;
+
+		if (need_resched()) {
+			/*
+			 * We're trying to balance between building up a nice
+			 * long list of IOs to improve our merge rate, and
+			 * getting those IOs out quickly for anyone throttling
+			 * in balance_dirty_pages().  cond_resched() doesn't
+			 * unplug, so get our IOs out the door before we
+			 * give up the CPU.
+			 */
+			blk_flush_plug(current);
+			cond_resched();
+		}
+
+
 		spin_lock(&wb->list_lock);
 		spin_lock(&inode->i_lock);
 		if (!(inode->i_state & I_DIRTY_ALL))
@@ -1486,7 +1503,7 @@
 		requeue_inode(inode, wb, &wbc);
 		inode_sync_complete(inode);
 		spin_unlock(&inode->i_lock);
-		cond_resched_lock(&wb->list_lock);
+
 		/*
 		 * bail out to wb_writeback() often enough to check
 		 * background threshold and other termination conditions.
@@ -1498,7 +1515,6 @@
 				break;
 		}
 	}
-	blk_finish_plug(&plug);
 	return wrote;
 }
 
@@ -1545,12 +1561,15 @@
 		.range_cyclic	= 1,
 		.reason		= reason,
 	};
+	struct blk_plug plug;
 
+	blk_start_plug(&plug);
 	spin_lock(&wb->list_lock);
 	if (list_empty(&wb->b_io))
 		queue_io(wb, &work);
 	__writeback_inodes_wb(wb, &work);
 	spin_unlock(&wb->list_lock);
+	blk_finish_plug(&plug);
 
 	return nr_pages - work.nr_pages;
 }
@@ -1578,10 +1597,12 @@
 	unsigned long oldest_jif;
 	struct inode *inode;
 	long progress;
+	struct blk_plug plug;
 
 	oldest_jif = jiffies;
 	work->older_than_this = &oldest_jif;
 
+	blk_start_plug(&plug);
 	spin_lock(&wb->list_lock);
 	for (;;) {
 		/*
@@ -1661,6 +1682,7 @@
 		}
 	}
 	spin_unlock(&wb->list_lock);
+	blk_finish_plug(&plug);
 
 	return nr_pages - work->nr_pages;
 }
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index a38e38f..9bd1244 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -34,6 +34,7 @@
 #include <linux/percpu.h>
 #include <linux/list_sort.h>
 #include <linux/lockref.h>
+#include <linux/rhashtable.h>
 
 #include "gfs2.h"
 #include "incore.h"
@@ -50,9 +51,8 @@
 #include "trace_gfs2.h"
 
 struct gfs2_glock_iter {
-	int hash;			/* hash bucket index           */
-	unsigned nhash;			/* Index within current bucket */
 	struct gfs2_sbd *sdp;		/* incore superblock           */
+	struct rhashtable_iter hti;	/* rhashtable iterator         */
 	struct gfs2_glock *gl;		/* current glock struct        */
 	loff_t last_pos;		/* last position               */
 };
@@ -70,44 +70,19 @@
 
 #define GFS2_GL_HASH_SHIFT      15
 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
-#define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
 
-static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
-static struct dentry *gfs2_root;
+static struct rhashtable_params ht_parms = {
+	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
+	.key_len = sizeof(struct lm_lockname),
+	.key_offset = offsetof(struct gfs2_glock, gl_name),
+	.head_offset = offsetof(struct gfs2_glock, gl_node),
+};
 
-/**
- * gl_hash() - Turn glock number into hash bucket number
- * @lock: The glock number
- *
- * Returns: The number of the corresponding hash bucket
- */
+static struct rhashtable gl_hash_table;
 
-static unsigned int gl_hash(const struct gfs2_sbd *sdp,
-			    const struct lm_lockname *name)
+void gfs2_glock_free(struct gfs2_glock *gl)
 {
-	unsigned int h;
-
-	h = jhash(&name->ln_number, sizeof(u64), 0);
-	h = jhash(&name->ln_type, sizeof(unsigned int), h);
-	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
-	h &= GFS2_GL_HASH_MASK;
-
-	return h;
-}
-
-static inline void spin_lock_bucket(unsigned int hash)
-{
-	hlist_bl_lock(&gl_hash_table[hash]);
-}
-
-static inline void spin_unlock_bucket(unsigned int hash)
-{
-	hlist_bl_unlock(&gl_hash_table[hash]);
-}
-
-static void gfs2_glock_dealloc(struct rcu_head *rcu)
-{
-	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 
 	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
 		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
@@ -115,13 +90,6 @@
 		kfree(gl->gl_lksb.sb_lvbptr);
 		kmem_cache_free(gfs2_glock_cachep, gl);
 	}
-}
-
-void gfs2_glock_free(struct gfs2_glock *gl)
-{
-	struct gfs2_sbd *sdp = gl->gl_sbd;
-
-	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
 	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
 		wake_up(&sdp->sd_glock_wait);
 }
@@ -192,7 +160,7 @@
 
 void gfs2_glock_put(struct gfs2_glock *gl)
 {
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct address_space *mapping = gfs2_glock2aspace(gl);
 
 	if (lockref_put_or_lock(&gl->gl_lockref))
@@ -202,9 +170,7 @@
 
 	gfs2_glock_remove_from_lru(gl);
 	spin_unlock(&gl->gl_lockref.lock);
-	spin_lock_bucket(gl->gl_hash);
-	hlist_bl_del_rcu(&gl->gl_list);
-	spin_unlock_bucket(gl->gl_hash);
+	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
 	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
 	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
 	trace_gfs2_glock_put(gl);
@@ -212,33 +178,6 @@
 }
 
 /**
- * search_bucket() - Find struct gfs2_glock by lock number
- * @bucket: the bucket to search
- * @name: The lock name
- *
- * Returns: NULL, or the struct gfs2_glock with the requested number
- */
-
-static struct gfs2_glock *search_bucket(unsigned int hash,
-					const struct gfs2_sbd *sdp,
-					const struct lm_lockname *name)
-{
-	struct gfs2_glock *gl;
-	struct hlist_bl_node *h;
-
-	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
-		if (!lm_name_equal(&gl->gl_name, name))
-			continue;
-		if (gl->gl_sbd != sdp)
-			continue;
-		if (lockref_get_not_dead(&gl->gl_lockref))
-			return gl;
-	}
-
-	return NULL;
-}
-
-/**
  * may_grant - check if its ok to grant a new lock
  * @gl: The glock
  * @gh: The lock request which we wish to grant
@@ -506,7 +445,7 @@
 __acquires(&gl->gl_spin)
 {
 	const struct gfs2_glock_operations *glops = gl->gl_ops;
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	unsigned int lck_flags = gh ? gh->gh_flags : 0;
 	int ret;
 
@@ -628,7 +567,7 @@
 static void delete_work_func(struct work_struct *work)
 {
 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct gfs2_inode *ip;
 	struct inode *inode;
 	u64 no_addr = gl->gl_name.ln_number;
@@ -704,15 +643,17 @@
 		   struct gfs2_glock **glp)
 {
 	struct super_block *s = sdp->sd_vfs;
-	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
-	struct gfs2_glock *gl, *tmp;
-	unsigned int hash = gl_hash(sdp, &name);
+	struct lm_lockname name = { .ln_number = number,
+				    .ln_type = glops->go_type,
+				    .ln_sbd = sdp };
+	struct gfs2_glock *gl, *tmp = NULL;
 	struct address_space *mapping;
 	struct kmem_cache *cachep;
+	int ret, tries = 0;
 
-	rcu_read_lock();
-	gl = search_bucket(hash, sdp, &name);
-	rcu_read_unlock();
+	gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
+	if (gl && !lockref_get_not_dead(&gl->gl_lockref))
+		gl = NULL;
 
 	*glp = gl;
 	if (gl)
@@ -739,14 +680,13 @@
 	}
 
 	atomic_inc(&sdp->sd_glock_disposal);
-	gl->gl_sbd = sdp;
+	gl->gl_node.next = NULL;
 	gl->gl_flags = 0;
 	gl->gl_name = name;
 	gl->gl_lockref.count = 1;
 	gl->gl_state = LM_ST_UNLOCKED;
 	gl->gl_target = LM_ST_UNLOCKED;
 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
-	gl->gl_hash = hash;
 	gl->gl_ops = glops;
 	gl->gl_dstamp = ktime_set(0, 0);
 	preempt_disable();
@@ -771,22 +711,34 @@
 		mapping->writeback_index = 0;
 	}
 
-	spin_lock_bucket(hash);
-	tmp = search_bucket(hash, sdp, &name);
-	if (tmp) {
-		spin_unlock_bucket(hash);
-		kfree(gl->gl_lksb.sb_lvbptr);
-		kmem_cache_free(cachep, gl);
-		atomic_dec(&sdp->sd_glock_disposal);
-		gl = tmp;
-	} else {
-		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
-		spin_unlock_bucket(hash);
+again:
+	ret = rhashtable_lookup_insert_fast(&gl_hash_table, &gl->gl_node,
+					    ht_parms);
+	if (ret == 0) {
+		*glp = gl;
+		return 0;
 	}
 
-	*glp = gl;
+	if (ret == -EEXIST) {
+		ret = 0;
+		tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
+		if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
+			if (++tries < 100) {
+				cond_resched();
+				goto again;
+			}
+			tmp = NULL;
+			ret = -ENOMEM;
+		}
+	} else {
+		WARN_ON_ONCE(ret);
+	}
+	kfree(gl->gl_lksb.sb_lvbptr);
+	kmem_cache_free(cachep, gl);
+	atomic_dec(&sdp->sd_glock_disposal);
+	*glp = tmp;
 
-	return 0;
+	return ret;
 }
 
 /**
@@ -928,7 +880,7 @@
 __acquires(&gl->gl_spin)
 {
 	struct gfs2_glock *gl = gh->gh_gl;
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct list_head *insert_pt = NULL;
 	struct gfs2_holder *gh2;
 	int try_futile = 0;
@@ -1006,7 +958,7 @@
 int gfs2_glock_nq(struct gfs2_holder *gh)
 {
 	struct gfs2_glock *gl = gh->gh_gl;
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	int error = 0;
 
 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
@@ -1313,7 +1265,7 @@
 
 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
 {
-	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
+	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
 
 	spin_lock(&gl->gl_spin);
 	gl->gl_reply = ret;
@@ -1462,31 +1414,26 @@
  *
  */
 
-static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
-			  unsigned int hash)
+static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
 {
 	struct gfs2_glock *gl;
-	struct hlist_bl_head *head = &gl_hash_table[hash];
-	struct hlist_bl_node *pos;
+	struct rhash_head *pos, *next;
+	const struct bucket_table *tbl;
+	int i;
 
 	rcu_read_lock();
-	hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
-		if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
-			examiner(gl);
+	tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
+	for (i = 0; i < tbl->size; i++) {
+		rht_for_each_entry_safe(gl, pos, next, tbl, i, gl_node) {
+			if ((gl->gl_name.ln_sbd == sdp) &&
+			    lockref_get_not_dead(&gl->gl_lockref))
+				examiner(gl);
+		}
 	}
 	rcu_read_unlock();
 	cond_resched();
 }
 
-static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
-{
-	unsigned x;
-
-	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
-		examine_bucket(examiner, sdp, x);
-}
-
-
 /**
  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
  * @gl: The glock to thaw
@@ -1569,7 +1516,7 @@
 	int ret;
 
 	ret = gfs2_truncatei_resume(ip);
-	gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
+	gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
 
 	spin_lock(&gl->gl_spin);
 	clear_bit(GLF_LOCK, &gl->gl_flags);
@@ -1733,17 +1680,17 @@
 {
 	struct gfs2_glock *gl = iter_ptr;
 
-	seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n",
+	seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
 		   gl->gl_name.ln_type,
 		   (unsigned long long)gl->gl_name.ln_number,
-		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
-		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
-		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
-		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
-		   (long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
-		   (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
-		   (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
-		   (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
+		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
+		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
+		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
+		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
+		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
+		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
+		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
+		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
 	return 0;
 }
 
@@ -1776,11 +1723,10 @@
 
 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
 {
-	struct gfs2_glock_iter *gi = seq->private;
-	struct gfs2_sbd *sdp = gi->sdp;
-	unsigned index = gi->hash >> 3;
-	unsigned subindex = gi->hash & 0x07;
-	s64 value;
+	struct gfs2_sbd *sdp = seq->private;
+	loff_t pos = *(loff_t *)iter_ptr;
+	unsigned index = pos >> 3;
+	unsigned subindex = pos & 0x07;
 	int i;
 
 	if (index == 0 && subindex != 0)
@@ -1791,12 +1737,12 @@
 
 	for_each_possible_cpu(i) {
                 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
-		if (index == 0) {
-			value = i;
-		} else {
-			value = lkstats->lkstats[index - 1].stats[subindex];
-		}
-		seq_printf(seq, " %15lld", (long long)value);
+
+		if (index == 0)
+			seq_printf(seq, " %15u", i);
+		else
+			seq_printf(seq, " %15llu", (unsigned long long)lkstats->
+				   lkstats[index - 1].stats[subindex]);
 	}
 	seq_putc(seq, '\n');
 	return 0;
@@ -1804,20 +1750,24 @@
 
 int __init gfs2_glock_init(void)
 {
-	unsigned i;
-	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
-		INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
-	}
+	int ret;
+
+	ret = rhashtable_init(&gl_hash_table, &ht_parms);
+	if (ret < 0)
+		return ret;
 
 	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
 					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
-	if (!glock_workqueue)
+	if (!glock_workqueue) {
+		rhashtable_destroy(&gl_hash_table);
 		return -ENOMEM;
+	}
 	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
 						WQ_MEM_RECLAIM | WQ_FREEZABLE,
 						0);
 	if (!gfs2_delete_workqueue) {
 		destroy_workqueue(glock_workqueue);
+		rhashtable_destroy(&gl_hash_table);
 		return -ENOMEM;
 	}
 
@@ -1829,72 +1779,41 @@
 void gfs2_glock_exit(void)
 {
 	unregister_shrinker(&glock_shrinker);
+	rhashtable_destroy(&gl_hash_table);
 	destroy_workqueue(glock_workqueue);
 	destroy_workqueue(gfs2_delete_workqueue);
 }
 
-static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
+static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
 {
-	return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
-			      struct gfs2_glock, gl_list);
-}
-
-static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
-{
-	return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
-			      struct gfs2_glock, gl_list);
-}
-
-static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
-{
-	struct gfs2_glock *gl;
-
 	do {
-		gl = gi->gl;
-		if (gl) {
-			gi->gl = glock_hash_next(gl);
-			gi->nhash++;
-		} else {
-			if (gi->hash >= GFS2_GL_HASH_SIZE) {
-				rcu_read_unlock();
-				return 1;
-			}
-			gi->gl = glock_hash_chain(gi->hash);
-			gi->nhash = 0;
-		}
-		while (gi->gl == NULL) {
-			gi->hash++;
-			if (gi->hash >= GFS2_GL_HASH_SIZE) {
-				rcu_read_unlock();
-				return 1;
-			}
-			gi->gl = glock_hash_chain(gi->hash);
-			gi->nhash = 0;
+		gi->gl = rhashtable_walk_next(&gi->hti);
+		if (IS_ERR(gi->gl)) {
+			if (PTR_ERR(gi->gl) == -EAGAIN)
+				continue;
+			gi->gl = NULL;
 		}
 	/* Skip entries for other sb and dead entries */
-	} while (gi->sdp != gi->gl->gl_sbd ||
-		 __lockref_is_dead(&gi->gl->gl_lockref));
-
-	return 0;
+	} while ((gi->gl) && ((gi->sdp != gi->gl->gl_name.ln_sbd) ||
+			      __lockref_is_dead(&gi->gl->gl_lockref)));
 }
 
 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
 {
 	struct gfs2_glock_iter *gi = seq->private;
 	loff_t n = *pos;
+	int ret;
 
 	if (gi->last_pos <= *pos)
-		n = gi->nhash + (*pos - gi->last_pos);
-	else
-		gi->hash = 0;
+		n = (*pos - gi->last_pos);
 
-	gi->nhash = 0;
-	rcu_read_lock();
+	ret = rhashtable_walk_start(&gi->hti);
+	if (ret)
+		return NULL;
 
 	do {
-		if (gfs2_glock_iter_next(gi))
-			return NULL;
-	} while (n--);
+		gfs2_glock_iter_next(gi);
+	} while (gi->gl && n--);
 
 	gi->last_pos = *pos;
 	return gi->gl;
@@ -1907,9 +1826,7 @@
 
 	(*pos)++;
 	gi->last_pos = *pos;
-	if (gfs2_glock_iter_next(gi))
-		return NULL;
-
+	gfs2_glock_iter_next(gi);
 	return gi->gl;
 }
 
@@ -1917,9 +1834,8 @@
 {
 	struct gfs2_glock_iter *gi = seq->private;
 
-	if (gi->gl)
-		rcu_read_unlock();
 	gi->gl = NULL;
+	rhashtable_walk_stop(&gi->hti);
 }
 
 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
@@ -1930,26 +1846,19 @@
 
 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
 {
-	struct gfs2_glock_iter *gi = seq->private;
-
-	gi->hash = *pos;
+	preempt_disable();
 	if (*pos >= GFS2_NR_SBSTATS)
 		return NULL;
-	preempt_disable();
-	return SEQ_START_TOKEN;
+	return pos;
 }
 
 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
 				   loff_t *pos)
 {
-	struct gfs2_glock_iter *gi = seq->private;
 	(*pos)++;
-	gi->hash++;
-	if (gi->hash >= GFS2_NR_SBSTATS) {
-		preempt_enable();
+	if (*pos >= GFS2_NR_SBSTATS)
 		return NULL;
-	}
-	return SEQ_START_TOKEN;
+	return pos;
 }
 
 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
@@ -1987,14 +1896,28 @@
 	if (ret == 0) {
 		struct seq_file *seq = file->private_data;
 		struct gfs2_glock_iter *gi = seq->private;
+
 		gi->sdp = inode->i_private;
+		gi->last_pos = 0;
 		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
 		if (seq->buf)
 			seq->size = GFS2_SEQ_GOODSIZE;
+		gi->gl = NULL;
+		ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
 	}
 	return ret;
 }
 
+static int gfs2_glocks_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+	struct gfs2_glock_iter *gi = seq->private;
+
+	gi->gl = NULL;
+	rhashtable_walk_exit(&gi->hti);
+	return seq_release_private(inode, file);
+}
+
 static int gfs2_glstats_open(struct inode *inode, struct file *file)
 {
 	int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
@@ -2003,21 +1926,22 @@
 		struct seq_file *seq = file->private_data;
 		struct gfs2_glock_iter *gi = seq->private;
 		gi->sdp = inode->i_private;
+		gi->last_pos = 0;
 		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
 		if (seq->buf)
 			seq->size = GFS2_SEQ_GOODSIZE;
+		gi->gl = NULL;
+		ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
 	}
 	return ret;
 }
 
 static int gfs2_sbstats_open(struct inode *inode, struct file *file)
 {
-	int ret = seq_open_private(file, &gfs2_sbstats_seq_ops,
-				   sizeof(struct gfs2_glock_iter));
+	int ret = seq_open(file, &gfs2_sbstats_seq_ops);
 	if (ret == 0) {
 		struct seq_file *seq = file->private_data;
-		struct gfs2_glock_iter *gi = seq->private;
-		gi->sdp = inode->i_private;
+		seq->private = inode->i_private;  /* sdp */
 	}
 	return ret;
 }
@@ -2027,7 +1951,7 @@
 	.open    = gfs2_glocks_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
-	.release = seq_release_private,
+	.release = gfs2_glocks_release,
 };
 
 static const struct file_operations gfs2_glstats_fops = {
@@ -2035,7 +1959,7 @@
 	.open    = gfs2_glstats_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
-	.release = seq_release_private,
+	.release = gfs2_glocks_release,
 };
 
 static const struct file_operations gfs2_sbstats_fops = {
@@ -2043,7 +1967,7 @@
 	.open	 = gfs2_sbstats_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
-	.release = seq_release_private,
+	.release = seq_release,
 };
 
 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index fa3fa5e..1f6c9c3 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -32,13 +32,15 @@
 
 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
 {
-	fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n",
+	fs_err(gl->gl_name.ln_sbd,
+	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
+	       "state 0x%lx\n",
 	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
 	       bh->b_page->mapping, bh->b_page->flags);
-	fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n",
+	fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
 	       gl->gl_name.ln_type, gl->gl_name.ln_number,
 	       gfs2_glock2aspace(gl));
-	gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n");
+	gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
 }
 
 /**
@@ -52,7 +54,7 @@
 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
 			     unsigned int nr_revokes)
 {
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct list_head *head = &gl->gl_ail_list;
 	struct gfs2_bufdata *bd, *tmp;
 	struct buffer_head *bh;
@@ -80,7 +82,7 @@
 
 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
 {
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct gfs2_trans tr;
 
 	memset(&tr, 0, sizeof(tr));
@@ -109,7 +111,7 @@
 
 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
 {
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	unsigned int revokes = atomic_read(&gl->gl_ail_count);
 	unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
 	int ret;
@@ -139,7 +141,7 @@
 
 static void rgrp_go_sync(struct gfs2_glock *gl)
 {
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct address_space *mapping = &sdp->sd_aspace;
 	struct gfs2_rgrpd *rgd;
 	int error;
@@ -179,7 +181,7 @@
 
 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
 {
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct address_space *mapping = &sdp->sd_aspace;
 	struct gfs2_rgrpd *rgd = gl->gl_object;
 
@@ -218,7 +220,7 @@
 
 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
 
-	gfs2_log_flush(gl->gl_sbd, gl, NORMAL_FLUSH);
+	gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
 	filemap_fdatawrite(metamapping);
 	if (ip) {
 		struct address_space *mapping = ip->i_inode.i_mapping;
@@ -252,7 +254,7 @@
 {
 	struct gfs2_inode *ip = gl->gl_object;
 
-	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
+	gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
 
 	if (flags & DIO_METADATA) {
 		struct address_space *mapping = gfs2_glock2aspace(gl);
@@ -264,9 +266,9 @@
 		}
 	}
 
-	if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
-		gfs2_log_flush(gl->gl_sbd, NULL, NORMAL_FLUSH);
-		gl->gl_sbd->sd_rindex_uptodate = 0;
+	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
+		gfs2_log_flush(gl->gl_name.ln_sbd, NULL, NORMAL_FLUSH);
+		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
 	}
 	if (ip && S_ISREG(ip->i_inode.i_mode))
 		truncate_inode_pages(ip->i_inode.i_mapping, 0);
@@ -281,7 +283,7 @@
 
 static int inode_go_demote_ok(const struct gfs2_glock *gl)
 {
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct gfs2_holder *gh;
 
 	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
@@ -416,7 +418,7 @@
 static int inode_go_lock(struct gfs2_holder *gh)
 {
 	struct gfs2_glock *gl = gh->gh_gl;
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct gfs2_inode *ip = gl->gl_object;
 	int error = 0;
 
@@ -477,7 +479,7 @@
 static void freeze_go_sync(struct gfs2_glock *gl)
 {
 	int error = 0;
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 
 	if (gl->gl_state == LM_ST_SHARED &&
 	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
@@ -500,7 +502,7 @@
 
 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
 {
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
 	struct gfs2_glock *j_gl = ip->i_gl;
 	struct gfs2_log_header_host head;
@@ -545,7 +547,7 @@
 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
 {
 	struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 
 	if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
 		return;
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index a1ec7c2..121ed08 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -22,6 +22,7 @@
 #include <linux/ktime.h>
 #include <linux/percpu.h>
 #include <linux/lockref.h>
+#include <linux/rhashtable.h>
 
 #define DIO_WAIT	0x00000010
 #define DIO_METADATA	0x00000020
@@ -203,13 +204,15 @@
 };
 
 struct lm_lockname {
+	struct gfs2_sbd *ln_sbd;
 	u64 ln_number;
 	unsigned int ln_type;
 };
 
 #define lm_name_equal(name1, name2) \
-        (((name1)->ln_number == (name2)->ln_number) && \
-         ((name1)->ln_type == (name2)->ln_type))
+        (((name1)->ln_number == (name2)->ln_number) &&	\
+	 ((name1)->ln_type == (name2)->ln_type) &&	\
+	 ((name1)->ln_sbd == (name2)->ln_sbd))
 
 
 struct gfs2_glock_operations {
@@ -241,7 +244,7 @@
 };
 
 struct gfs2_lkstats {
-	s64 stats[GFS2_NR_LKSTATS];
+	u64 stats[GFS2_NR_LKSTATS];
 };
 
 enum {
@@ -327,7 +330,6 @@
 
 struct gfs2_glock {
 	struct hlist_bl_node gl_list;
-	struct gfs2_sbd *gl_sbd;
 	unsigned long gl_flags;		/* GLF_... */
 	struct lm_lockname gl_name;
 
@@ -341,7 +343,6 @@
 		     gl_req:2,		/* State in last dlm request */
 		     gl_reply:8;	/* Last reply from the dlm */
 
-	unsigned int gl_hash;
 	unsigned long gl_demote_time; /* time of first demote request */
 	long gl_hold_time;
 	struct list_head gl_holders;
@@ -367,7 +368,7 @@
 			loff_t end;
 		} gl_vm;
 	};
-	struct rcu_head gl_rcu;
+	struct rhash_head gl_node;
 };
 
 #define GFS2_MIN_LVB_SIZE 32	/* Min size of LVB that gfs2 supports */
@@ -835,7 +836,7 @@
 
 static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
 {
-	const struct gfs2_sbd *sdp = gl->gl_sbd;
+	const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	preempt_disable();
 	this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
 	preempt_enable();
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 641383a..284c154 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -31,7 +31,7 @@
  *
  * @delta is the difference between the current rtt sample and the
  * running average srtt. We add 1/8 of that to the srtt in order to
- * update the current srtt estimate. The varience estimate is a bit
+ * update the current srtt estimate. The variance estimate is a bit
  * more complicated. We subtract the abs value of the @delta from
  * the current variance estimate and add 1/4 of that to the running
  * total.
@@ -80,7 +80,7 @@
 
 	preempt_disable();
 	rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp));
-	lks = this_cpu_ptr(gl->gl_sbd->sd_lkstats);
+	lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
 	gfs2_update_stats(&gl->gl_stats, index, rtt);		/* Local */
 	gfs2_update_stats(&lks->lkstats[gltype], index, rtt);	/* Global */
 	preempt_enable();
@@ -108,7 +108,7 @@
 	dstamp = gl->gl_dstamp;
 	gl->gl_dstamp = ktime_get_real();
 	irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp));
-	lks = this_cpu_ptr(gl->gl_sbd->sd_lkstats);
+	lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
 	gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt);		/* Local */
 	gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt);	/* Global */
 	preempt_enable();
@@ -253,7 +253,7 @@
 static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
 		     unsigned int flags)
 {
-	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
+	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
 	int req;
 	u32 lkf;
 	char strname[GDLM_STRNAME_BYTES] = "";
@@ -281,7 +281,7 @@
 
 static void gdlm_put_lock(struct gfs2_glock *gl)
 {
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
 	int lvb_needs_unlock = 0;
 	int error;
@@ -319,7 +319,7 @@
 
 static void gdlm_cancel(struct gfs2_glock *gl)
 {
-	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
+	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
 	dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
 }
 
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 92324ac..d5369a1 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -70,7 +70,7 @@
 static void maybe_release_space(struct gfs2_bufdata *bd)
 {
 	struct gfs2_glock *gl = bd->bd_gl;
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct gfs2_rgrpd *rgd = gl->gl_object;
 	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
 	struct gfs2_bitmap *bi = rgd->rd_bits + index;
@@ -578,7 +578,7 @@
 static void gfs2_meta_sync(struct gfs2_glock *gl)
 {
 	struct address_space *mapping = gfs2_glock2aspace(gl);
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	int error;
 
 	if (mapping == NULL)
@@ -588,7 +588,7 @@
 	error = filemap_fdatawait(mapping);
 
 	if (error)
-		gfs2_io_error(gl->gl_sbd);
+		gfs2_io_error(gl->gl_name.ln_sbd);
 }
 
 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index b984a6e..0e1d4be 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -114,7 +114,7 @@
 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
 {
 	struct address_space *mapping = gfs2_glock2aspace(gl);
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct page *page;
 	struct buffer_head *bh;
 	unsigned int shift;
@@ -200,7 +200,7 @@
 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
 		   struct buffer_head **bhp)
 {
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct buffer_head *bh;
 
 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
@@ -362,7 +362,7 @@
 
 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
 {
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct buffer_head *first_bh, *bh;
 	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
 			  sdp->sd_sb.sb_bsize_shift;
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index ac5d802..8ca1615 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -44,7 +44,7 @@
 {
 	struct inode *inode = mapping->host;
 	if (mapping->a_ops == &gfs2_meta_aops)
-		return (((struct gfs2_glock *)mapping) - 1)->gl_sbd;
+		return (((struct gfs2_glock *)mapping) - 1)->gl_name.ln_sbd;
 	else if (mapping->a_ops == &gfs2_rgrp_aops)
 		return container_of(mapping, struct gfs2_sbd, sd_aspace);
 	else
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 9b61f92..3a31226 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -119,7 +119,7 @@
 
 	while (!list_empty(list)) {
 		qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
-		sdp = qd->qd_gl->gl_sbd;
+		sdp = qd->qd_gl->gl_name.ln_sbd;
 
 		list_del(&qd->qd_lru);
 
@@ -302,7 +302,7 @@
 
 static void qd_hold(struct gfs2_quota_data *qd)
 {
-	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
+	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 	gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
 	lockref_get(&qd->qd_lockref);
 }
@@ -367,7 +367,7 @@
 
 static int bh_get(struct gfs2_quota_data *qd)
 {
-	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
+	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 	unsigned int block, offset;
 	struct buffer_head *bh;
@@ -414,7 +414,7 @@
 
 static void bh_put(struct gfs2_quota_data *qd)
 {
-	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
+	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 
 	mutex_lock(&sdp->sd_quota_mutex);
 	gfs2_assert(sdp, qd->qd_bh_count);
@@ -486,7 +486,7 @@
 
 static void qd_unlock(struct gfs2_quota_data *qd)
 {
-	gfs2_assert_warn(qd->qd_gl->gl_sbd,
+	gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
 			 test_bit(QDF_LOCKED, &qd->qd_flags));
 	clear_bit(QDF_LOCKED, &qd->qd_flags);
 	bh_put(qd);
@@ -614,7 +614,7 @@
 
 static void do_qc(struct gfs2_quota_data *qd, s64 change)
 {
-	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
+	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 	struct gfs2_quota_change *qc = qd->qd_bh_qc;
 	s64 x;
@@ -831,7 +831,7 @@
 
 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 {
-	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
+	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 	struct gfs2_alloc_parms ap = { .aflags = 0, };
 	unsigned int data_blocks, ind_blocks;
@@ -922,7 +922,7 @@
 		gfs2_glock_dq_uninit(&ghs[qx]);
 	mutex_unlock(&ip->i_inode.i_mutex);
 	kfree(ghs);
-	gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl, NORMAL_FLUSH);
+	gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, NORMAL_FLUSH);
 	return error;
 }
 
@@ -954,7 +954,7 @@
 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
 		    struct gfs2_holder *q_gh)
 {
-	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
+	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 	struct gfs2_holder i_gh;
 	int error;
@@ -1037,7 +1037,7 @@
 
 static int need_sync(struct gfs2_quota_data *qd)
 {
-	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
+	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 	struct gfs2_tune *gt = &sdp->sd_tune;
 	s64 value;
 	unsigned int num, den;
@@ -1125,7 +1125,7 @@
 
 static int print_message(struct gfs2_quota_data *qd, char *type)
 {
-	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
+	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 
 	fs_info(sdp, "quota %s for %s %u\n",
 		type,
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index c6c6232..475985d 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1860,13 +1860,13 @@
 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
 {
 	const struct gfs2_glock *gl = rgd->rd_gl;
-	const struct gfs2_sbd *sdp = gl->gl_sbd;
+	const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct gfs2_lkstats *st;
-	s64 r_dcount, l_dcount;
-	s64 l_srttb, a_srttb = 0;
+	u64 r_dcount, l_dcount;
+	u64 l_srttb, a_srttb = 0;
 	s64 srttb_diff;
-	s64 sqr_diff;
-	s64 var;
+	u64 sqr_diff;
+	u64 var;
 	int cpu, nonzero = 0;
 
 	preempt_disable();
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index 20c007d..49ac55d 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -104,7 +104,7 @@
 	),
 
 	TP_fast_assign(
-		__entry->dev		= gl->gl_sbd->sd_vfs->s_dev;
+		__entry->dev		= gl->gl_name.ln_sbd->sd_vfs->s_dev;
 		__entry->glnum		= gl->gl_name.ln_number;
 		__entry->gltype		= gl->gl_name.ln_type;
 		__entry->cur_state	= glock_trace_state(gl->gl_state);
@@ -140,7 +140,7 @@
 	),
 
 	TP_fast_assign(
-		__entry->dev		= gl->gl_sbd->sd_vfs->s_dev;
+		__entry->dev		= gl->gl_name.ln_sbd->sd_vfs->s_dev;
 		__entry->gltype		= gl->gl_name.ln_type;
 		__entry->glnum		= gl->gl_name.ln_number;
 		__entry->cur_state	= glock_trace_state(gl->gl_state);
@@ -174,7 +174,7 @@
 	),
 
 	TP_fast_assign(
-		__entry->dev		= gl->gl_sbd->sd_vfs->s_dev;
+		__entry->dev		= gl->gl_name.ln_sbd->sd_vfs->s_dev;
 		__entry->gltype		= gl->gl_name.ln_type;
 		__entry->glnum		= gl->gl_name.ln_number;
 		__entry->cur_state	= glock_trace_state(gl->gl_state);
@@ -209,7 +209,7 @@
 	),
 
 	TP_fast_assign(
-		__entry->dev	= gh->gh_gl->gl_sbd->sd_vfs->s_dev;
+		__entry->dev	= gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev;
 		__entry->glnum	= gh->gh_gl->gl_name.ln_number;
 		__entry->gltype	= gh->gh_gl->gl_name.ln_type;
 		__entry->first	= first;
@@ -239,7 +239,7 @@
 	),
 
 	TP_fast_assign(
-		__entry->dev	= gh->gh_gl->gl_sbd->sd_vfs->s_dev;
+		__entry->dev	= gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev;
 		__entry->glnum	= gh->gh_gl->gl_name.ln_number;
 		__entry->gltype	= gh->gh_gl->gl_name.ln_type;
 		__entry->queue	= queue;
@@ -267,18 +267,18 @@
 		__field(	int,	status		)
 		__field(	char,	flags		)
 		__field(	s64,	tdiff		)
-		__field(	s64,	srtt		)
-		__field(	s64,	srttvar		)
-		__field(	s64,	srttb		)
-		__field(	s64,	srttvarb	)
-		__field(	s64,	sirt		)
-		__field(	s64,	sirtvar		)
-		__field(	s64,	dcount		)
-		__field(	s64,	qcount		)
+		__field(	u64,	srtt		)
+		__field(	u64,	srttvar		)
+		__field(	u64,	srttb		)
+		__field(	u64,	srttvarb	)
+		__field(	u64,	sirt		)
+		__field(	u64,	sirtvar		)
+		__field(	u64,	dcount		)
+		__field(	u64,	qcount		)
 	),
 
 	TP_fast_assign(
-		__entry->dev            = gl->gl_sbd->sd_vfs->s_dev;
+		__entry->dev            = gl->gl_name.ln_sbd->sd_vfs->s_dev;
 		__entry->glnum          = gl->gl_name.ln_number;
 		__entry->gltype         = gl->gl_name.ln_type;
 		__entry->status		= gl->gl_lksb.sb_status;
@@ -333,7 +333,7 @@
 	),
 
 	TP_fast_assign(
-		__entry->dev		= bd->bd_gl->gl_sbd->sd_vfs->s_dev;
+		__entry->dev		= bd->bd_gl->gl_name.ln_sbd->sd_vfs->s_dev;
 		__entry->pin		= pin;
 		__entry->len		= bd->bd_bh->b_size;
 		__entry->block		= bd->bd_bh->b_blocknr;
@@ -449,7 +449,7 @@
 	),
 
 	TP_fast_assign(
-		__entry->dev            = ip->i_gl->gl_sbd->sd_vfs->s_dev;
+		__entry->dev            = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
 		__entry->lblock		= lblock;
 		__entry->pblock		= buffer_mapped(bh) ?  bh->b_blocknr : 0;
 		__entry->inum		= ip->i_no_addr;
@@ -489,7 +489,7 @@
 	),
 
 	TP_fast_assign(
-		__entry->dev		= rgd->rd_gl->gl_sbd->sd_vfs->s_dev;
+		__entry->dev		= rgd->rd_gl->gl_name.ln_sbd->sd_vfs->s_dev;
 		__entry->start		= block;
 		__entry->inum		= ip->i_no_addr;
 		__entry->len		= len;
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 88bff24..b95d0d6 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -158,7 +158,7 @@
 void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
 {
 	struct gfs2_trans *tr = current->journal_info;
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct address_space *mapping = bh->b_page->mapping;
 	struct gfs2_inode *ip = GFS2_I(mapping->host);
 	struct gfs2_bufdata *bd;
@@ -224,7 +224,7 @@
 void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
 {
 
-	struct gfs2_sbd *sdp = gl->gl_sbd;
+	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 	struct gfs2_bufdata *bd;
 
 	lock_buffer(bh);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 2714ef8..be806ea 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -113,7 +113,8 @@
 	return status;
 }
 
-static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
+static int nfs_delegation_claim_opens(struct inode *inode,
+		const nfs4_stateid *stateid, fmode_t type)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_open_context *ctx;
@@ -140,7 +141,7 @@
 		/* Block nfs4_proc_unlck */
 		mutex_lock(&sp->so_delegreturn_mutex);
 		seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
-		err = nfs4_open_delegation_recall(ctx, state, stateid);
+		err = nfs4_open_delegation_recall(ctx, state, stateid, type);
 		if (!err)
 			err = nfs_delegation_claim_locks(ctx, state, stateid);
 		if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
@@ -411,7 +412,8 @@
 	do {
 		if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
 			break;
-		err = nfs_delegation_claim_opens(inode, &delegation->stateid);
+		err = nfs_delegation_claim_opens(inode, &delegation->stateid,
+				delegation->type);
 		if (!issync || err != -EAGAIN)
 			break;
 		/*
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index a448291..333063e 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -54,7 +54,7 @@
 
 /* NFSv4 delegation-related procedures */
 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
-int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
+int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
 bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags);
 
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 38678d9..4b1d08f 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -166,8 +166,11 @@
 	struct nfs_writeverf *verfp = &dreq->verf;
 
 #ifdef CONFIG_NFS_V4_1
-	if (ds_clp) {
-		/* pNFS is in use, use the DS verf */
+	/*
+	 * pNFS is in use, use the DS verf except commit_through_mds is set
+	 * for layout segment where nbuckets is zero.
+	 */
+	if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
 		if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
 			verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
 		else
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index b34f2e2..02ec079 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -629,23 +629,18 @@
 	goto out;
 }
 
-static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl)
+static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
 {
 	int i;
 
-	for (i = 0; i < fl->num_fh; i++) {
-		if (!fl->fh_array[i])
-			break;
-		kfree(fl->fh_array[i]);
+	if (fl->fh_array) {
+		for (i = 0; i < fl->num_fh; i++) {
+			if (!fl->fh_array[i])
+				break;
+			kfree(fl->fh_array[i]);
+		}
+		kfree(fl->fh_array);
 	}
-	kfree(fl->fh_array);
-	fl->fh_array = NULL;
-}
-
-static void
-_filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
-{
-	filelayout_free_fh_array(fl);
 	kfree(fl);
 }
 
@@ -716,21 +711,21 @@
 		/* Do we want to use a mempool here? */
 		fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
 		if (!fl->fh_array[i])
-			goto out_err_free;
+			goto out_err;
 
 		p = xdr_inline_decode(&stream, 4);
 		if (unlikely(!p))
-			goto out_err_free;
+			goto out_err;
 		fl->fh_array[i]->size = be32_to_cpup(p++);
 		if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
 			printk(KERN_ERR "NFS: Too big fh %d received %d\n",
 			       i, fl->fh_array[i]->size);
-			goto out_err_free;
+			goto out_err;
 		}
 
 		p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
 		if (unlikely(!p))
-			goto out_err_free;
+			goto out_err;
 		memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
 		dprintk("DEBUG: %s: fh len %d\n", __func__,
 			fl->fh_array[i]->size);
@@ -739,8 +734,6 @@
 	__free_page(scratch);
 	return 0;
 
-out_err_free:
-	filelayout_free_fh_array(fl);
 out_err:
 	__free_page(scratch);
 	return -EIO;
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index d731bbf..0f020e4 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -175,10 +175,12 @@
 {
 	struct nfs_server *server = NFS_SERVER(file_inode(filep));
 	struct nfs4_exception exception = { };
-	int err;
+	loff_t err;
 
 	do {
 		err = _nfs42_proc_llseek(filep, offset, whence);
+		if (err >= 0)
+			break;
 		if (err == -ENOTSUPP)
 			return -EOPNOTSUPP;
 		err = nfs4_handle_exception(server, err, &exception);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 693b903..f93b9cd 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1127,6 +1127,21 @@
 	return ret;
 }
 
+static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
+		fmode_t fmode)
+{
+	switch(fmode & (FMODE_READ|FMODE_WRITE)) {
+	case FMODE_READ|FMODE_WRITE:
+		return state->n_rdwr != 0;
+	case FMODE_WRITE:
+		return state->n_wronly != 0;
+	case FMODE_READ:
+		return state->n_rdonly != 0;
+	}
+	WARN_ON_ONCE(1);
+	return false;
+}
+
 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
 {
 	int ret = 0;
@@ -1571,17 +1586,13 @@
 	return opendata;
 }
 
-static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
+static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
+		fmode_t fmode)
 {
 	struct nfs4_state *newstate;
 	int ret;
 
-	if ((opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
-	     opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEG_CUR_FH) &&
-	    (opendata->o_arg.u.delegation_type & fmode) != fmode)
-		/* This mode can't have been delegated, so we must have
-		 * a valid open_stateid to cover it - not need to reclaim.
-		 */
+	if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
 		return 0;
 	opendata->o_arg.open_flags = 0;
 	opendata->o_arg.fmode = fmode;
@@ -1597,14 +1608,14 @@
 	newstate = nfs4_opendata_to_nfs4_state(opendata);
 	if (IS_ERR(newstate))
 		return PTR_ERR(newstate);
+	if (newstate != opendata->state)
+		ret = -ESTALE;
 	nfs4_close_state(newstate, fmode);
-	*res = newstate;
-	return 0;
+	return ret;
 }
 
 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
 {
-	struct nfs4_state *newstate;
 	int ret;
 
 	/* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
@@ -1615,27 +1626,15 @@
 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
 	clear_bit(NFS_OPEN_STATE, &state->flags);
 	smp_rmb();
-	if (state->n_rdwr != 0) {
-		ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
-		if (ret != 0)
-			return ret;
-		if (newstate != state)
-			return -ESTALE;
-	}
-	if (state->n_wronly != 0) {
-		ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
-		if (ret != 0)
-			return ret;
-		if (newstate != state)
-			return -ESTALE;
-	}
-	if (state->n_rdonly != 0) {
-		ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
-		if (ret != 0)
-			return ret;
-		if (newstate != state)
-			return -ESTALE;
-	}
+	ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
+	if (ret != 0)
+		return ret;
+	ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
+	if (ret != 0)
+		return ret;
+	ret = nfs4_open_recover_helper(opendata, FMODE_READ);
+	if (ret != 0)
+		return ret;
 	/*
 	 * We may have performed cached opens for all three recoveries.
 	 * Check if we need to update the current stateid.
@@ -1759,18 +1758,32 @@
 	return err;
 }
 
-int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
+int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
+		struct nfs4_state *state, const nfs4_stateid *stateid,
+		fmode_t type)
 {
 	struct nfs_server *server = NFS_SERVER(state->inode);
 	struct nfs4_opendata *opendata;
-	int err;
+	int err = 0;
 
 	opendata = nfs4_open_recoverdata_alloc(ctx, state,
 			NFS4_OPEN_CLAIM_DELEG_CUR_FH);
 	if (IS_ERR(opendata))
 		return PTR_ERR(opendata);
 	nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
-	err = nfs4_open_recover(opendata, state);
+	clear_bit(NFS_DELEGATED_STATE, &state->flags);
+	switch (type & (FMODE_READ|FMODE_WRITE)) {
+	case FMODE_READ|FMODE_WRITE:
+	case FMODE_WRITE:
+		err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
+		if (err)
+			break;
+		err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
+		if (err)
+			break;
+	case FMODE_READ:
+		err = nfs4_open_recover_helper(opendata, FMODE_READ);
+	}
 	nfs4_opendata_put(opendata);
 	return nfs4_handle_delegation_recall_error(server, state, stateid, err);
 }
@@ -2645,6 +2658,15 @@
 	return err;
 }
 
+static bool
+nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
+{
+	if (inode == NULL || !nfs_have_layout(inode))
+		return false;
+
+	return pnfs_wait_on_layoutreturn(inode, task);
+}
+
 struct nfs4_closedata {
 	struct inode *inode;
 	struct nfs4_state *state;
@@ -2763,6 +2785,11 @@
 		goto out_no_action;
 	}
 
+	if (nfs4_wait_on_layoutreturn(inode, task)) {
+		nfs_release_seqid(calldata->arg.seqid);
+		goto out_wait;
+	}
+
 	if (calldata->arg.fmode == 0)
 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
 	if (calldata->roc)
@@ -5308,6 +5335,9 @@
 
 	d_data = (struct nfs4_delegreturndata *)data;
 
+	if (nfs4_wait_on_layoutreturn(d_data->inode, task))
+		return;
+
 	if (d_data->roc)
 		pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier);
 
@@ -7800,39 +7830,46 @@
 			dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
 				__func__, delay);
 			rpc_delay(task, delay);
-			task->tk_status = 0;
-			rpc_restart_call_prepare(task);
-			goto out; /* Do not call nfs4_async_handle_error() */
+			/* Do not call nfs4_async_handle_error() */
+			goto out_restart;
 		}
 		break;
 	case -NFS4ERR_EXPIRED:
 	case -NFS4ERR_BAD_STATEID:
 		spin_lock(&inode->i_lock);
-		lo = NFS_I(inode)->layout;
-		if (!lo || list_empty(&lo->plh_segs)) {
+		if (nfs4_stateid_match(&lgp->args.stateid,
+					&lgp->args.ctx->state->stateid)) {
 			spin_unlock(&inode->i_lock);
 			/* If the open stateid was bad, then recover it. */
 			state = lgp->args.ctx->state;
-		} else {
+			break;
+		}
+		lo = NFS_I(inode)->layout;
+		if (lo && nfs4_stateid_match(&lgp->args.stateid,
+					&lo->plh_stateid)) {
 			LIST_HEAD(head);
 
 			/*
 			 * Mark the bad layout state as invalid, then retry
 			 * with the current stateid.
 			 */
+			set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
 			pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
 			spin_unlock(&inode->i_lock);
 			pnfs_free_lseg_list(&head);
-	
-			task->tk_status = 0;
-			rpc_restart_call_prepare(task);
-		}
+		} else
+			spin_unlock(&inode->i_lock);
+		goto out_restart;
 	}
 	if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN)
-		rpc_restart_call_prepare(task);
+		goto out_restart;
 out:
 	dprintk("<-- %s\n", __func__);
 	return;
+out_restart:
+	task->tk_status = 0;
+	rpc_restart_call_prepare(task);
+	return;
 out_overflow:
 	task->tk_status = -EOVERFLOW;
 	goto out;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index da73bc4..5db3246 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1481,7 +1481,7 @@
 					spin_unlock(&state->state_lock);
 				}
 				nfs4_put_open_state(state);
-				clear_bit(NFS4CLNT_RECLAIM_NOGRACE,
+				clear_bit(NFS_STATE_RECLAIM_NOGRACE,
 					&state->flags);
 				spin_lock(&sp->so_lock);
 				goto restart;
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 7c5718b..fe3ddd2 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -508,7 +508,7 @@
 	 * for it without upsetting the slab allocator.
 	 */
 	if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
-			sizeof(struct page) > PAGE_SIZE)
+			sizeof(struct page *) > PAGE_SIZE)
 		return 0;
 
 	return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index ba12464..8abe271 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1104,20 +1104,15 @@
 			mark_lseg_invalid(lseg, &tmp_list);
 			found = true;
 		}
-	/* pnfs_prepare_layoutreturn() grabs lo ref and it will be put
-	 * in pnfs_roc_release(). We don't really send a layoutreturn but
-	 * still want others to view us like we are sending one!
-	 *
-	 * If pnfs_prepare_layoutreturn() fails, it means someone else is doing
-	 * LAYOUTRETURN, so we proceed like there are no layouts to return.
-	 *
-	 * ROC in three conditions:
+	/* ROC in two conditions:
 	 * 1. there are ROC lsegs
 	 * 2. we don't send layoutreturn
-	 * 3. no others are sending layoutreturn
 	 */
-	if (found && !layoutreturn && pnfs_prepare_layoutreturn(lo))
+	if (found && !layoutreturn) {
+		/* lo ref dropped in pnfs_roc_release() */
+		pnfs_get_layout_hdr(lo);
 		roc = true;
+	}
 
 out_noroc:
 	spin_unlock(&ino->i_lock);
@@ -1172,6 +1167,26 @@
 	spin_unlock(&ino->i_lock);
 }
 
+bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
+{
+	struct nfs_inode *nfsi = NFS_I(ino);
+        struct pnfs_layout_hdr *lo;
+        bool sleep = false;
+
+	/* we might not have grabbed lo reference. so need to check under
+	 * i_lock */
+        spin_lock(&ino->i_lock);
+        lo = nfsi->layout;
+        if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+                sleep = true;
+        spin_unlock(&ino->i_lock);
+
+        if (sleep)
+                rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
+
+        return sleep;
+}
+
 /*
  * Compare two layout segments for sorting into layout cache.
  * We want to preferentially return RW over RO layouts, so ensure those
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 78c9351..d1990e9 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -270,6 +270,7 @@
 void pnfs_roc_release(struct inode *ino);
 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
 void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier);
+bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task);
 void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t);
 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
 int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
@@ -639,6 +640,12 @@
 {
 }
 
+static inline bool
+pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
+{
+	return false;
+}
+
 static inline void set_pnfs_layoutdriver(struct nfs_server *s,
 					 const struct nfs_fh *mntfh, u32 id)
 {
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index ae0ff7a..01b8cc8 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -72,6 +72,9 @@
 {
 	struct nfs_pgio_mirror *mirror;
 
+	if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
+		pgio->pg_ops->pg_cleanup(pgio);
+
 	pgio->pg_ops = &nfs_pgio_rw_ops;
 
 	/* read path should never have more than one mirror */
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 388f480..72624dc 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1351,6 +1351,9 @@
 {
 	struct nfs_pgio_mirror *mirror;
 
+	if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
+		pgio->pg_ops->pg_cleanup(pgio);
+
 	pgio->pg_ops = &nfs_pgio_rw_ops;
 
 	nfs_pageio_stop_mirroring(pgio);
diff --git a/fs/nsfs.c b/fs/nsfs.c
index e4905fb..8f20d60 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -142,7 +142,8 @@
 	struct inode *inode = d_inode(dentry);
 	const struct proc_ns_operations *ns_ops = dentry->d_fsdata;
 
-	return seq_printf(seq, "%s:[%lu]", ns_ops->name, inode->i_ino);
+	seq_printf(seq, "%s:[%lu]", ns_ops->name, inode->i_ino);
+	return 0;
 }
 
 static const struct super_operations nsfs_ops = {
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 46b8b2b..ee5aa4d 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1439,6 +1439,7 @@
 	int found, ret;
 	int set_maybe;
 	int dispatch_assert = 0;
+	int dispatched = 0;
 
 	if (!dlm_grab(dlm))
 		return DLM_MASTER_RESP_NO;
@@ -1658,15 +1659,18 @@
 			mlog(ML_ERROR, "failed to dispatch assert master work\n");
 			response = DLM_MASTER_RESP_ERROR;
 			dlm_lockres_put(res);
-		} else
+		} else {
+			dispatched = 1;
 			__dlm_lockres_grab_inflight_worker(dlm, res);
+		}
 		spin_unlock(&res->spinlock);
 	} else {
 		if (res)
 			dlm_lockres_put(res);
 	}
 
-	dlm_put(dlm);
+	if (!dispatched)
+		dlm_put(dlm);
 	return response;
 }
 
@@ -2090,7 +2094,6 @@
 
 
 	/* queue up work for dlm_assert_master_worker */
-	dlm_grab(dlm);  /* get an extra ref for the work item */
 	dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
 	item->u.am.lockres = res; /* already have a ref */
 	/* can optionally ignore node numbers higher than this node */
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index d0e436d..3d90ad7 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1694,6 +1694,7 @@
 	unsigned int hash;
 	int master = DLM_LOCK_RES_OWNER_UNKNOWN;
 	u32 flags = DLM_ASSERT_MASTER_REQUERY;
+	int dispatched = 0;
 
 	if (!dlm_grab(dlm)) {
 		/* since the domain has gone away on this
@@ -1719,8 +1720,10 @@
 				dlm_put(dlm);
 				/* sender will take care of this and retry */
 				return ret;
-			} else
+			} else {
+				dispatched = 1;
 				__dlm_lockres_grab_inflight_worker(dlm, res);
+			}
 			spin_unlock(&res->spinlock);
 		} else {
 			/* put.. incase we are not the master */
@@ -1730,7 +1733,8 @@
 	}
 	spin_unlock(&dlm->spinlock);
 
-	dlm_put(dlm);
+	if (!dispatched)
+		dlm_put(dlm);
 	return master;
 }
 
@@ -1776,7 +1780,7 @@
 				     struct dlm_migratable_lockres *mres)
 {
 	struct dlm_migratable_lock *ml;
-	struct list_head *queue;
+	struct list_head *queue, *iter;
 	struct list_head *tmpq = NULL;
 	struct dlm_lock *newlock = NULL;
 	struct dlm_lockstatus *lksb = NULL;
@@ -1821,7 +1825,9 @@
 			spin_lock(&res->spinlock);
 			for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
 				tmpq = dlm_list_idx_to_ptr(res, j);
-				list_for_each_entry(lock, tmpq, list) {
+				list_for_each(iter, tmpq) {
+					lock = list_entry(iter,
+						  struct dlm_lock, list);
 					if (lock->ml.cookie == ml->cookie)
 						break;
 					lock = NULL;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 263b125..225586e 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -372,16 +372,16 @@
  *	@esc:	set of characters that need escaping
  *
  *	Puts string into buffer, replacing each occurrence of character from
- *	@esc with usual octal escape.  Returns 0 in case of success, -1 - in
- *	case of overflow.
+ *	@esc with usual octal escape.
+ *	Use seq_has_overflowed() to check for errors.
  */
-int seq_escape(struct seq_file *m, const char *s, const char *esc)
+void seq_escape(struct seq_file *m, const char *s, const char *esc)
 {
 	char *end = m->buf + m->size;
-        char *p;
+	char *p;
 	char c;
 
-        for (p = m->buf + m->count; (c = *s) != '\0' && p < end; s++) {
+	for (p = m->buf + m->count; (c = *s) != '\0' && p < end; s++) {
 		if (!strchr(esc, c)) {
 			*p++ = c;
 			continue;
@@ -394,14 +394,13 @@
 			continue;
 		}
 		seq_set_overflow(m);
-		return -1;
-        }
+		return;
+	}
 	m->count = p - m->buf;
-        return 0;
 }
 EXPORT_SYMBOL(seq_escape);
 
-int seq_vprintf(struct seq_file *m, const char *f, va_list args)
+void seq_vprintf(struct seq_file *m, const char *f, va_list args)
 {
 	int len;
 
@@ -409,24 +408,20 @@
 		len = vsnprintf(m->buf + m->count, m->size - m->count, f, args);
 		if (m->count + len < m->size) {
 			m->count += len;
-			return 0;
+			return;
 		}
 	}
 	seq_set_overflow(m);
-	return -1;
 }
 EXPORT_SYMBOL(seq_vprintf);
 
-int seq_printf(struct seq_file *m, const char *f, ...)
+void seq_printf(struct seq_file *m, const char *f, ...)
 {
-	int ret;
 	va_list args;
 
 	va_start(args, f);
-	ret = seq_vprintf(m, f, args);
+	seq_vprintf(m, f, args);
 	va_end(args);
-
-	return ret;
 }
 EXPORT_SYMBOL(seq_printf);
 
@@ -664,26 +659,25 @@
 }
 EXPORT_SYMBOL(seq_open_private);
 
-int seq_putc(struct seq_file *m, char c)
+void seq_putc(struct seq_file *m, char c)
 {
-	if (m->count < m->size) {
-		m->buf[m->count++] = c;
-		return 0;
-	}
-	return -1;
+	if (m->count >= m->size)
+		return;
+
+	m->buf[m->count++] = c;
 }
 EXPORT_SYMBOL(seq_putc);
 
-int seq_puts(struct seq_file *m, const char *s)
+void seq_puts(struct seq_file *m, const char *s)
 {
 	int len = strlen(s);
-	if (m->count + len < m->size) {
-		memcpy(m->buf + m->count, s, len);
-		m->count += len;
-		return 0;
+
+	if (m->count + len >= m->size) {
+		seq_set_overflow(m);
+		return;
 	}
-	seq_set_overflow(m);
-	return -1;
+	memcpy(m->buf + m->count, s, len);
+	m->count += len;
 }
 EXPORT_SYMBOL(seq_puts);
 
@@ -694,8 +688,8 @@
  * This routine is very quick when you show lots of numbers.
  * In usual cases, it will be better to use seq_printf(). It's easier to read.
  */
-int seq_put_decimal_ull(struct seq_file *m, char delimiter,
-			unsigned long long num)
+void seq_put_decimal_ull(struct seq_file *m, char delimiter,
+			 unsigned long long num)
 {
 	int len;
 
@@ -707,35 +701,33 @@
 
 	if (num < 10) {
 		m->buf[m->count++] = num + '0';
-		return 0;
+		return;
 	}
 
 	len = num_to_str(m->buf + m->count, m->size - m->count, num);
 	if (!len)
 		goto overflow;
 	m->count += len;
-	return 0;
+	return;
+
 overflow:
 	seq_set_overflow(m);
-	return -1;
 }
 EXPORT_SYMBOL(seq_put_decimal_ull);
 
-int seq_put_decimal_ll(struct seq_file *m, char delimiter,
-			long long num)
+void seq_put_decimal_ll(struct seq_file *m, char delimiter, long long num)
 {
 	if (num < 0) {
 		if (m->count + 3 >= m->size) {
 			seq_set_overflow(m);
-			return -1;
+			return;
 		}
 		if (delimiter)
 			m->buf[m->count++] = delimiter;
 		num = -num;
 		delimiter = '-';
 	}
-	return seq_put_decimal_ull(m, delimiter, num);
-
+	seq_put_decimal_ull(m, delimiter, num);
 }
 EXPORT_SYMBOL(seq_put_decimal_ll);
 
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 96f3448..fd65b3f 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -652,11 +652,8 @@
 {
 	int err;
 
-	mutex_lock(&inode->i_mutex);
 	err = security_inode_init_security(inode, dentry, qstr,
 					   &init_xattrs, 0);
-	mutex_unlock(&inode->i_mutex);
-
 	if (err) {
 		struct ubifs_info *c = dentry->i_sb->s_fs_info;
 		ubifs_err(c, "cannot initialize security for inode %lu, error %d",
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 634e676..5031170 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -467,8 +467,8 @@
 	 * the fault_*wqh.
 	 */
 	spin_lock(&ctx->fault_pending_wqh.lock);
-	__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0, &range);
-	__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, &range);
+	__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
+	__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
 	spin_unlock(&ctx->fault_pending_wqh.lock);
 
 	wake_up_poll(&ctx->fd_wqh, POLLHUP);
@@ -650,10 +650,10 @@
 	spin_lock(&ctx->fault_pending_wqh.lock);
 	/* wake all in the range and autoremove */
 	if (waitqueue_active(&ctx->fault_pending_wqh))
-		__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0,
+		__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
 				     range);
 	if (waitqueue_active(&ctx->fault_wqh))
-		__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, range);
+		__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range);
 	spin_unlock(&ctx->fault_pending_wqh.lock);
 }
 
@@ -1287,8 +1287,10 @@
 
 	file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
 				  O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
-	if (IS_ERR(file))
+	if (IS_ERR(file)) {
+		mmput(ctx->mm);
 		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
+	}
 out:
 	return file;
 }
diff --git a/include/acpi/button.h b/include/acpi/button.h
index 97eea0e..1cad8b2 100644
--- a/include/acpi/button.h
+++ b/include/acpi/button.h
@@ -3,7 +3,7 @@
 
 #include <linux/notifier.h>
 
-#if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE)
+#if IS_ENABLED(CONFIG_ACPI_BUTTON)
 extern int acpi_lid_notifier_register(struct notifier_block *nb);
 extern int acpi_lid_notifier_unregister(struct notifier_block *nb);
 extern int acpi_lid_open(void);
@@ -20,6 +20,6 @@
 {
 	return 1;
 }
-#endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */
+#endif /* IS_ENABLED(CONFIG_ACPI_BUTTON) */
 
 #endif /* ACPI_BUTTON_H */
diff --git a/include/acpi/video.h b/include/acpi/video.h
index e840b29..c62392d 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -24,7 +24,7 @@
 	acpi_backlight_native,
 };
 
-#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
+#if IS_ENABLED(CONFIG_ACPI_VIDEO)
 extern int acpi_video_register(void);
 extern void acpi_video_unregister(void);
 extern int acpi_video_get_edid(struct acpi_device *device, int type,
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index f20f407..4b4b056 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -73,7 +73,7 @@
  * Convert a physical address to a Page Frame Number and back
  */
 #define	__phys_to_pfn(paddr)	((unsigned long)((paddr) >> PAGE_SHIFT))
-#define	__pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT)
+#define	__pfn_to_phys(pfn)	PFN_PHYS(pfn)
 
 #define page_to_pfn __page_to_pfn
 #define pfn_to_page __pfn_to_page
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 83bfb87..e2aadbc 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -111,8 +111,8 @@
 		cpu_relax();
 }
 
-#ifndef virt_queued_spin_lock
-static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
+#ifndef virt_spin_lock
+static __always_inline bool virt_spin_lock(struct qspinlock *lock)
 {
 	return false;
 }
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index d901f1a..4e14dac 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -35,11 +35,7 @@
 #define VGIC_V3_MAX_LRS		16
 #define VGIC_MAX_IRQS		1024
 #define VGIC_V2_MAX_CPUS	8
-
-/* Sanity checks... */
-#if (KVM_MAX_VCPUS > 255)
-#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
-#endif
+#define VGIC_V3_MAX_CPUS	255
 
 #if (VGIC_NR_IRQS_LEGACY & 31)
 #error "VGIC_NR_IRQS must be a multiple of 32"
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 7235c48..43856d1 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -217,6 +217,7 @@
 
 int acpi_pci_irq_enable (struct pci_dev *dev);
 void acpi_penalize_isa_irq(int irq, int active);
+bool acpi_isa_irq_available(int irq);
 void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
 void acpi_pci_irq_disable (struct pci_dev *dev);
 
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5a5d79e..d5eb4ad1 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,6 +13,7 @@
 #include <linux/sched.h>
 #include <linux/blkdev.h>
 #include <linux/writeback.h>
+#include <linux/memcontrol.h>
 #include <linux/blk-cgroup.h>
 #include <linux/backing-dev-defs.h>
 #include <linux/slab.h>
@@ -252,13 +253,19 @@
  * @inode: inode of interest
  *
  * cgroup writeback requires support from both the bdi and filesystem.
- * Test whether @inode has both.
+ * Also, both memcg and iocg have to be on the default hierarchy.  Test
+ * whether all conditions are met.
+ *
+ * Note that the test result may change dynamically on the same inode
+ * depending on how memcg and iocg are configured.
  */
 static inline bool inode_cgwb_enabled(struct inode *inode)
 {
 	struct backing_dev_info *bdi = inode_to_bdi(inode);
 
-	return bdi_cap_account_dirty(bdi) &&
+	return cgroup_on_dfl(mem_cgroup_root_css->cgroup) &&
+		cgroup_on_dfl(blkcg_root_css->cgroup) &&
+		bdi_cap_account_dirty(bdi) &&
 		(bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
 		(inode->i_sb->s_iflags & SB_I_CGROUPWB);
 }
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 708923b9..99da9eb 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -584,7 +584,7 @@
 
 #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
 
-#define rq_data_dir(rq)		(((rq)->cmd_flags & 1) != 0)
+#define rq_data_dir(rq)		((int)((rq)->cmd_flags & 1))
 
 /*
  * Driver can handle struct request, if it either has an old style
@@ -1368,6 +1368,26 @@
 		((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
 }
 
+static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
+			 struct bio *next)
+{
+	if (!bio_has_data(prev))
+		return false;
+
+	return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
+				next->bi_io_vec[0].bv_offset);
+}
+
+static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
+{
+	return bio_will_gap(req->q, req->biotail, bio);
+}
+
+static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
+{
+	return bio_will_gap(req->q, bio, req->bio);
+}
+
 struct work_struct;
 int kblockd_schedule_work(struct work_struct *work);
 int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
@@ -1494,6 +1514,26 @@
 	return q->limits.max_integrity_segments;
 }
 
+static inline bool integrity_req_gap_back_merge(struct request *req,
+						struct bio *next)
+{
+	struct bio_integrity_payload *bip = bio_integrity(req->bio);
+	struct bio_integrity_payload *bip_next = bio_integrity(next);
+
+	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+				bip_next->bip_vec[0].bv_offset);
+}
+
+static inline bool integrity_req_gap_front_merge(struct request *req,
+						 struct bio *bio)
+{
+	struct bio_integrity_payload *bip = bio_integrity(bio);
+	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
+
+	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+				bip_next->bip_vec[0].bv_offset);
+}
+
 #else /* CONFIG_BLK_DEV_INTEGRITY */
 
 struct bio;
@@ -1560,6 +1600,16 @@
 {
 	return 0;
 }
+static inline bool integrity_req_gap_back_merge(struct request *req,
+						struct bio *next)
+{
+	return false;
+}
+static inline bool integrity_req_gap_front_merge(struct request *req,
+						 struct bio *bio)
+{
+	return false;
+}
 
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f57d7fe..e3a51b7 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,7 +10,6 @@
 #include <uapi/linux/bpf.h>
 #include <linux/workqueue.h>
 #include <linux/file.h>
-#include <linux/perf_event.h>
 
 struct bpf_map;
 
@@ -37,6 +36,8 @@
 	u32 key_size;
 	u32 value_size;
 	u32 max_entries;
+	u32 pages;
+	struct user_struct *user;
 	const struct bpf_map_ops *ops;
 	struct work_struct work;
 };
@@ -101,6 +102,8 @@
 	BPF_WRITE = 2
 };
 
+struct bpf_prog;
+
 struct bpf_verifier_ops {
 	/* return eBPF function prototype for verification */
 	const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
@@ -112,7 +115,7 @@
 
 	u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
 				  int src_reg, int ctx_off,
-				  struct bpf_insn *insn);
+				  struct bpf_insn *insn, struct bpf_prog *prog);
 };
 
 struct bpf_prog_type_list {
@@ -121,14 +124,13 @@
 	enum bpf_prog_type type;
 };
 
-struct bpf_prog;
-
 struct bpf_prog_aux {
 	atomic_t refcnt;
 	u32 used_map_cnt;
 	const struct bpf_verifier_ops *ops;
 	struct bpf_map **used_maps;
 	struct bpf_prog *prog;
+	struct user_struct *user;
 	union {
 		struct work_struct work;
 		struct rcu_head	rcu;
@@ -168,6 +170,8 @@
 struct bpf_map *bpf_map_get(struct fd f);
 void bpf_map_put(struct bpf_map *map);
 
+extern int sysctl_unprivileged_bpf_disabled;
+
 /* verify correctness of eBPF program */
 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
 #else
@@ -201,4 +205,8 @@
 extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
 extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
 
+/* Shared helpers among cBPF and eBPF. */
+void bpf_user_rnd_init_once(void);
+u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+
 #endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 697ca77..59f4a73 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -30,6 +30,8 @@
 #define PHY_ID_BCM7439_2		0xae025080
 #define PHY_ID_BCM7445			0x600d8510
 
+#define PHY_ID_BCM_CYGNUS		0xae025200
+
 #define PHY_BCM_OUI_MASK		0xfffffc00
 #define PHY_BCM_OUI_1			0x00206000
 #define PHY_BCM_OUI_2			0x0143bc00
@@ -138,7 +140,10 @@
 
 /* 01010: Auto Power-Down */
 #define BCM54XX_SHD_APD			0x0a
+#define  BCM_APD_CLR_MASK		0xFE9F /* clear bits 5, 6 & 8 */
 #define  BCM54XX_SHD_APD_EN		0x0020
+#define  BCM_NO_ANEG_APD_EN		0x0060 /* bits 5 & 6 */
+#define  BCM_APD_SINGLELP_EN	0x0100 /* Bit 8 */
 
 #define BCM5482_SHD_LEDS1	0x0d	/* 01101: LED Selector 1 */
 					/* LED3 / ~LINKSPD[2] selector */
@@ -209,27 +214,13 @@
 #define MII_BRCM_FET_SHDW_AUXSTAT2	0x1b	/* Auxiliary status 2 */
 #define MII_BRCM_FET_SHDW_AS2_APDE	0x0020	/* Auto power down enable */
 
-/*
- * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T
- * 0x1c shadow registers.
- */
-static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow)
-{
-	phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
-	return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
-}
-
-static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow,
-				       u16 val)
-{
-	return phy_write(phydev, MII_BCM54XX_SHD,
-			 MII_BCM54XX_SHD_WRITE |
-			 MII_BCM54XX_SHD_VAL(shadow) |
-			 MII_BCM54XX_SHD_DATA(val));
-}
-
 #define BRCM_CL45VEN_EEE_CONTROL	0x803d
 #define LPI_FEATURE_EN			0x8000
 #define LPI_FEATURE_EN_DIG1000X		0x4000
 
+/* Core register definitions*/
+#define MII_BRCM_CORE_BASE1E	0x1E
+#define MII_BRCM_CORE_EXPB0	0xB0
+#define MII_BRCM_CORE_EXPB1	0xB1
+
 #endif /* _LINUX_BRCMPHY_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index c3a9c8f..735f9f8 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -14,9 +14,10 @@
 #define _CAN_DEV_H
 
 #include <linux/can.h>
-#include <linux/can/netlink.h>
 #include <linux/can/error.h>
 #include <linux/can/led.h>
+#include <linux/can/netlink.h>
+#include <linux/netdevice.h>
 
 /*
  * CAN mode
@@ -77,7 +78,7 @@
 #define get_canfd_dlc(i)	(min_t(__u8, (i), CANFD_MAX_DLC))
 
 /* Drop a given socketbuffer if it does not contain a valid CAN frame. */
-static inline int can_dropped_invalid_skb(struct net_device *dev,
+static inline bool can_dropped_invalid_skb(struct net_device *dev,
 					  struct sk_buff *skb)
 {
 	const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
@@ -93,12 +94,12 @@
 	} else
 		goto inval_skb;
 
-	return 0;
+	return false;
 
 inval_skb:
 	kfree_skb(skb);
 	dev->stats.tx_dropped++;
-	return 1;
+	return true;
 }
 
 static inline bool can_is_canfd_skb(const struct sk_buff *skb)
diff --git a/include/linux/can/led.h b/include/linux/can/led.h
index 146de45..2746f7c 100644
--- a/include/linux/can/led.h
+++ b/include/linux/can/led.h
@@ -11,6 +11,7 @@
 
 #include <linux/if.h>
 #include <linux/leds.h>
+#include <linux/netdevice.h>
 
 enum can_led_event {
 	CAN_LED_EVENT_OPEN,
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 4763ad6..f89b31d 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -107,6 +107,7 @@
 	 CEPH_FEATURE_OSDMAP_ENC |		\
 	 CEPH_FEATURE_CRUSH_TUNABLES3 |		\
 	 CEPH_FEATURE_OSD_PRIMARY_AFFINITY |	\
+	 CEPH_FEATURE_MSGR_KEEPALIVE2 |		\
 	 CEPH_FEATURE_CRUSH_V4)
 
 #define CEPH_FEATURES_REQUIRED_DEFAULT   \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 9ebee53..397c5cd 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -46,6 +46,7 @@
 	unsigned long mount_timeout;		/* jiffies */
 	unsigned long osd_idle_ttl;		/* jiffies */
 	unsigned long osd_keepalive_timeout;	/* jiffies */
+	unsigned long monc_ping_timeout;	/* jiffies */
 
 	/*
 	 * any type that can't be simply compared or doesn't need need
@@ -66,6 +67,7 @@
 #define CEPH_MOUNT_TIMEOUT_DEFAULT	msecs_to_jiffies(60 * 1000)
 #define CEPH_OSD_KEEPALIVE_DEFAULT	msecs_to_jiffies(5 * 1000)
 #define CEPH_OSD_IDLE_TTL_DEFAULT	msecs_to_jiffies(60 * 1000)
+#define CEPH_MONC_PING_TIMEOUT_DEFAULT	msecs_to_jiffies(30 * 1000)
 
 #define CEPH_MSG_MAX_FRONT_LEN	(16*1024*1024)
 #define CEPH_MSG_MAX_MIDDLE_LEN	(16*1024*1024)
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 3775327..b2371d9 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -238,6 +238,8 @@
 	bool out_kvec_is_msg; /* kvec refers to out_msg */
 	int out_more;        /* there is more data after the kvecs */
 	__le64 out_temp_ack; /* for writing an ack */
+	struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
+						     stamp */
 
 	/* message in temps */
 	struct ceph_msg_header in_hdr;
@@ -248,6 +250,8 @@
 	int in_base_pos;     /* bytes read */
 	__le64 in_temp_ack;  /* for reading an ack */
 
+	struct timespec last_keepalive_ack; /* keepalive2 ack stamp */
+
 	struct delayed_work work;	    /* send|recv work */
 	unsigned long       delay;          /* current delay interval */
 };
@@ -285,6 +289,8 @@
 extern void ceph_msg_revoke_incoming(struct ceph_msg *msg);
 
 extern void ceph_con_keepalive(struct ceph_connection *con);
+extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
+				       unsigned long interval);
 
 extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
 				size_t length, size_t alignment);
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 1c18872..0fe2656 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -84,10 +84,12 @@
 #define CEPH_MSGR_TAG_MSG           7  /* message */
 #define CEPH_MSGR_TAG_ACK           8  /* message ack */
 #define CEPH_MSGR_TAG_KEEPALIVE     9  /* just a keepalive byte! */
-#define CEPH_MSGR_TAG_BADPROTOVER  10  /* bad protocol version */
+#define CEPH_MSGR_TAG_BADPROTOVER   10 /* bad protocol version */
 #define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */
 #define CEPH_MSGR_TAG_FEATURES      12 /* insufficient features */
 #define CEPH_MSGR_TAG_SEQ           13 /* 64-bit int follows with seen seq number */
+#define CEPH_MSGR_TAG_KEEPALIVE2    14 /* keepalive2 byte + ceph_timespec */
+#define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */
 
 
 /*
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 4d8fcf2..8492721 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -473,31 +473,8 @@
 	unsigned int depends_on;
 };
 
-extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
-
-/**
- * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
- * @tsk: target task
- *
- * Called from threadgroup_change_begin() and allows cgroup operations to
- * synchronize against threadgroup changes using a percpu_rw_semaphore.
- */
-static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
-{
-	percpu_down_read(&cgroup_threadgroup_rwsem);
-}
-
-/**
- * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
- * @tsk: target task
- *
- * Called from threadgroup_change_end().  Counterpart of
- * cgroup_threadcgroup_change_begin().
- */
-static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
-{
-	percpu_up_read(&cgroup_threadgroup_rwsem);
-}
+void cgroup_threadgroup_change_begin(struct task_struct *tsk);
+void cgroup_threadgroup_change_end(struct task_struct *tsk);
 
 #else	/* CONFIG_CGROUPS */
 
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 31ce435..bdcf358 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -18,15 +18,6 @@
 struct clock_event_device;
 struct module;
 
-/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */
-enum clock_event_mode {
-	CLOCK_EVT_MODE_UNUSED,
-	CLOCK_EVT_MODE_SHUTDOWN,
-	CLOCK_EVT_MODE_PERIODIC,
-	CLOCK_EVT_MODE_ONESHOT,
-	CLOCK_EVT_MODE_RESUME,
-};
-
 /*
  * Possible states of a clock event device.
  *
@@ -86,16 +77,14 @@
  * @min_delta_ns:	minimum delta value in ns
  * @mult:		nanosecond to cycles multiplier
  * @shift:		nanoseconds to cycles divisor (power of two)
- * @mode:		operating mode, relevant only to ->set_mode(), OBSOLETE
  * @state_use_accessors:current state of the device, assigned by the core code
  * @features:		features
  * @retries:		number of forced programming retries
- * @set_mode:		legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME.
- * @set_state_periodic:	switch state to periodic, if !set_mode
- * @set_state_oneshot:	switch state to oneshot, if !set_mode
- * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode
- * @set_state_shutdown:	switch state to shutdown, if !set_mode
- * @tick_resume:	resume clkevt device, if !set_mode
+ * @set_state_periodic:	switch state to periodic
+ * @set_state_oneshot:	switch state to oneshot
+ * @set_state_oneshot_stopped: switch state to oneshot_stopped
+ * @set_state_shutdown:	switch state to shutdown
+ * @tick_resume:	resume clkevt device
  * @broadcast:		function to broadcast events
  * @min_delta_ticks:	minimum delta value in ticks stored for reconfiguration
  * @max_delta_ticks:	maximum delta value in ticks stored for reconfiguration
@@ -116,18 +105,10 @@
 	u64			min_delta_ns;
 	u32			mult;
 	u32			shift;
-	enum clock_event_mode	mode;
 	enum clock_event_state	state_use_accessors;
 	unsigned int		features;
 	unsigned long		retries;
 
-	/*
-	 * State transition callback(s): Only one of the two groups should be
-	 * defined:
-	 * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
-	 * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume().
-	 */
-	void			(*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
 	int			(*set_state_periodic)(struct clock_event_device *);
 	int			(*set_state_oneshot)(struct clock_event_device *);
 	int			(*set_state_oneshot_stopped)(struct clock_event_device *);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 430efcb..dca22de 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -127,9 +127,14 @@
 #define CPUFREQ_SHARED_TYPE_ANY	 (3) /* Freq can be set from any dependent CPU*/
 
 #ifdef CONFIG_CPU_FREQ
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
 void cpufreq_cpu_put(struct cpufreq_policy *policy);
 #else
+static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
+{
+	return NULL;
+}
 static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
 {
 	return NULL;
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 2210254..61d042b 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -202,16 +202,16 @@
 #define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
 #define DCCP_SERVICE_CODE_IS_ABSENT		0
 
-static inline int dccp_list_has_service(const struct dccp_service_list *sl,
+static inline bool dccp_list_has_service(const struct dccp_service_list *sl,
 					const __be32 service)
 {
 	if (likely(sl != NULL)) {
 		u32 i = sl->dccpsl_nr;
 		while (i--)
 			if (sl->dccpsl_list[i] == service)
-				return 1;
+				return true;
 	}
-	return 0;
+	return false;
 }
 
 struct dccp_ackvec;
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index ce447f0..68030e2 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -65,7 +65,10 @@
  *			The "flags" parameter's possible values are
  *			explained above with "DEVFREQ_FLAG_*" macros.
  * @get_dev_status:	The device should provide the current performance
- *			status to devfreq, which is used by governors.
+ *			status to devfreq. Governors are recommended not to
+ *			use this directly. Instead, governors are recommended
+ *			to use devfreq_update_stats() along with
+ *			devfreq.last_status.
  * @get_cur_freq:	The device should provide the current frequency
  *			at which it is operating.
  * @exit:		An optional callback that is called when devfreq
@@ -161,6 +164,7 @@
 	struct delayed_work work;
 
 	unsigned long previous_freq;
+	struct devfreq_dev_status last_status;
 
 	void *data; /* private data for governors */
 
@@ -204,6 +208,19 @@
 extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
 						struct devfreq *devfreq);
 
+/**
+ * devfreq_update_stats() - update the last_status pointer in struct devfreq
+ * @df:		the devfreq instance whose status needs updating
+ *
+ *  Governors are recommended to use this function along with last_status,
+ * which allows other entities to reuse the last_status without affecting
+ * the values fetched later by governors.
+ */
+static inline int devfreq_update_stats(struct devfreq *df)
+{
+	return df->profile->get_dev_status(df->dev.parent, &df->last_status);
+}
+
 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
 /**
  * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
@@ -289,6 +306,11 @@
 							struct devfreq *devfreq)
 {
 }
+
+static inline int devfreq_update_stats(struct devfreq *df)
+{
+	return -EINVAL;
+}
 #endif /* CONFIG_PM_DEVFREQ */
 
 #endif /* __LINUX_DEVFREQ_H__ */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fa2cab9..4165e9a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -13,6 +13,7 @@
 #include <linux/printk.h>
 #include <linux/workqueue.h>
 #include <linux/sched.h>
+#include <net/sch_generic.h>
 
 #include <asm/cacheflush.h>
 
@@ -302,10 +303,6 @@
 	bpf_size;						\
 })
 
-/* Macro to invoke filter function. */
-#define SK_RUN_FILTER(filter, ctx) \
-	(*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
-
 #ifdef CONFIG_COMPAT
 /* A struct sock_filter is architecture independent. */
 struct compat_sock_fprog {
@@ -326,8 +323,12 @@
 
 struct bpf_prog {
 	u16			pages;		/* Number of allocated pages */
-	bool			jited;		/* Is our filter JIT'ed? */
-	bool			gpl_compatible;	/* Is our filter GPL compatible? */
+	kmemcheck_bitfield_begin(meta);
+	u16			jited:1,	/* Is our filter JIT'ed? */
+				gpl_compatible:1, /* Is filter GPL compatible? */
+				cb_access:1,	/* Is control block accessed? */
+				dst_needed:1;	/* Do we need dst entry? */
+	kmemcheck_bitfield_end(meta);
 	u32			len;		/* Number of filter blocks */
 	enum bpf_prog_type	type;		/* Type of BPF program */
 	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
@@ -349,6 +350,39 @@
 
 #define BPF_PROG_RUN(filter, ctx)  (*filter->bpf_func)(ctx, filter->insnsi)
 
+static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
+				       struct sk_buff *skb)
+{
+	u8 *cb_data = qdisc_skb_cb(skb)->data;
+	u8 saved_cb[QDISC_CB_PRIV_LEN];
+	u32 res;
+
+	BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
+		     QDISC_CB_PRIV_LEN);
+
+	if (unlikely(prog->cb_access)) {
+		memcpy(saved_cb, cb_data, sizeof(saved_cb));
+		memset(cb_data, 0, sizeof(saved_cb));
+	}
+
+	res = BPF_PROG_RUN(prog, skb);
+
+	if (unlikely(prog->cb_access))
+		memcpy(cb_data, saved_cb, sizeof(saved_cb));
+
+	return res;
+}
+
+static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
+					struct sk_buff *skb)
+{
+	u8 *cb_data = qdisc_skb_cb(skb)->data;
+
+	if (unlikely(prog->cb_access))
+		memset(cb_data, 0, QDISC_CB_PRIV_LEN);
+	return BPF_PROG_RUN(prog, skb);
+}
+
 static inline unsigned int bpf_prog_size(unsigned int proglen)
 {
 	return max(sizeof(struct bpf_prog),
@@ -408,7 +442,7 @@
 
 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
-			      bpf_aux_classic_check_t trans);
+			      bpf_aux_classic_check_t trans, bool save_orig);
 void bpf_prog_destroy(struct bpf_prog *fp);
 
 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 09460d6..a4c61cb 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -8,7 +8,7 @@
 extern void genl_lock(void);
 extern void genl_unlock(void);
 #ifdef CONFIG_LOCKDEP
-extern int lockdep_genl_is_held(void);
+extern bool lockdep_genl_is_held(void);
 #endif
 
 /* for synchronisation between af_netlink and genetlink */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index cfa906f..dcfb2f4 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -121,7 +121,7 @@
 #define IEEE80211_MAX_SN		IEEE80211_SN_MASK
 #define IEEE80211_SN_MODULO		(IEEE80211_MAX_SN + 1)
 
-static inline int ieee80211_sn_less(u16 sn1, u16 sn2)
+static inline bool ieee80211_sn_less(u16 sn1, u16 sn2)
 {
 	return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1);
 }
@@ -250,7 +250,7 @@
  * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_tods(__le16 fc)
+static inline bool ieee80211_has_tods(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0;
 }
@@ -259,7 +259,7 @@
  * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_fromds(__le16 fc)
+static inline bool ieee80211_has_fromds(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0;
 }
@@ -268,7 +268,7 @@
  * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_a4(__le16 fc)
+static inline bool ieee80211_has_a4(__le16 fc)
 {
 	__le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS);
 	return (fc & tmp) == tmp;
@@ -278,7 +278,7 @@
  * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_morefrags(__le16 fc)
+static inline bool ieee80211_has_morefrags(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0;
 }
@@ -287,7 +287,7 @@
  * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_retry(__le16 fc)
+static inline bool ieee80211_has_retry(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0;
 }
@@ -296,7 +296,7 @@
  * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_pm(__le16 fc)
+static inline bool ieee80211_has_pm(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0;
 }
@@ -305,7 +305,7 @@
  * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_moredata(__le16 fc)
+static inline bool ieee80211_has_moredata(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0;
 }
@@ -314,7 +314,7 @@
  * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_protected(__le16 fc)
+static inline bool ieee80211_has_protected(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0;
 }
@@ -323,7 +323,7 @@
  * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_order(__le16 fc)
+static inline bool ieee80211_has_order(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0;
 }
@@ -332,7 +332,7 @@
  * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_mgmt(__le16 fc)
+static inline bool ieee80211_is_mgmt(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT);
@@ -342,7 +342,7 @@
  * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_ctl(__le16 fc)
+static inline bool ieee80211_is_ctl(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL);
@@ -352,7 +352,7 @@
  * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_data(__le16 fc)
+static inline bool ieee80211_is_data(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_DATA);
@@ -362,7 +362,7 @@
  * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_data_qos(__le16 fc)
+static inline bool ieee80211_is_data_qos(__le16 fc)
 {
 	/*
 	 * mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need
@@ -376,7 +376,7 @@
  * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_data_present(__le16 fc)
+static inline bool ieee80211_is_data_present(__le16 fc)
 {
 	/*
 	 * mask with 0x40 and test that that bit is clear to only return true
@@ -390,7 +390,7 @@
  * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_assoc_req(__le16 fc)
+static inline bool ieee80211_is_assoc_req(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ);
@@ -400,7 +400,7 @@
  * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_assoc_resp(__le16 fc)
+static inline bool ieee80211_is_assoc_resp(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP);
@@ -410,7 +410,7 @@
  * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_reassoc_req(__le16 fc)
+static inline bool ieee80211_is_reassoc_req(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ);
@@ -420,7 +420,7 @@
  * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_reassoc_resp(__le16 fc)
+static inline bool ieee80211_is_reassoc_resp(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP);
@@ -430,7 +430,7 @@
  * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_probe_req(__le16 fc)
+static inline bool ieee80211_is_probe_req(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ);
@@ -440,7 +440,7 @@
  * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_probe_resp(__le16 fc)
+static inline bool ieee80211_is_probe_resp(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP);
@@ -450,7 +450,7 @@
  * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_beacon(__le16 fc)
+static inline bool ieee80211_is_beacon(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
@@ -460,7 +460,7 @@
  * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_atim(__le16 fc)
+static inline bool ieee80211_is_atim(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM);
@@ -470,7 +470,7 @@
  * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_disassoc(__le16 fc)
+static inline bool ieee80211_is_disassoc(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC);
@@ -480,7 +480,7 @@
  * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_auth(__le16 fc)
+static inline bool ieee80211_is_auth(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
@@ -490,7 +490,7 @@
  * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_deauth(__le16 fc)
+static inline bool ieee80211_is_deauth(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH);
@@ -500,7 +500,7 @@
  * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_action(__le16 fc)
+static inline bool ieee80211_is_action(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION);
@@ -510,7 +510,7 @@
  * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_back_req(__le16 fc)
+static inline bool ieee80211_is_back_req(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
@@ -520,7 +520,7 @@
  * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_back(__le16 fc)
+static inline bool ieee80211_is_back(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK);
@@ -530,7 +530,7 @@
  * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_pspoll(__le16 fc)
+static inline bool ieee80211_is_pspoll(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
@@ -540,7 +540,7 @@
  * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_rts(__le16 fc)
+static inline bool ieee80211_is_rts(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
@@ -550,7 +550,7 @@
  * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_cts(__le16 fc)
+static inline bool ieee80211_is_cts(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
@@ -560,7 +560,7 @@
  * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_ack(__le16 fc)
+static inline bool ieee80211_is_ack(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK);
@@ -570,7 +570,7 @@
  * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_cfend(__le16 fc)
+static inline bool ieee80211_is_cfend(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND);
@@ -580,7 +580,7 @@
  * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_cfendack(__le16 fc)
+static inline bool ieee80211_is_cfendack(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK);
@@ -590,7 +590,7 @@
  * ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_nullfunc(__le16 fc)
+static inline bool ieee80211_is_nullfunc(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC);
@@ -600,7 +600,7 @@
  * ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_qos_nullfunc(__le16 fc)
+static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
@@ -624,7 +624,7 @@
  * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
  * @seq_ctrl: frame sequence control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_first_frag(__le16 seq_ctrl)
+static inline bool ieee80211_is_first_frag(__le16 seq_ctrl)
 {
 	return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0;
 }
@@ -1379,6 +1379,7 @@
 
 
 /* block-ack parameters */
+#define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001
 #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
 #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
 #define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
@@ -1745,8 +1746,7 @@
 	WLAN_EID_TIM = 5,
 	WLAN_EID_IBSS_PARAMS = 6,
 	WLAN_EID_COUNTRY = 7,
-	WLAN_EID_HP_PARAMS = 8,
-	WLAN_EID_HP_TABLE = 9,
+	/* 8, 9 reserved */
 	WLAN_EID_REQUEST = 10,
 	WLAN_EID_QBSS_LOAD = 11,
 	WLAN_EID_EDCA_PARAM_SET = 12,
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 1dc1f4e..d3e4156 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -25,12 +25,22 @@
 
 #include <linux/types.h>
 #include <linux/random.h>
-#include <asm/byteorder.h>
 
 #define IEEE802154_MTU			127
 #define IEEE802154_ACK_PSDU_LEN		5
 #define IEEE802154_MIN_PSDU_LEN		9
 #define IEEE802154_FCS_LEN		2
+#define IEEE802154_MAX_AUTH_TAG_LEN	16
+
+/*  General MAC frame format:
+ *  2 bytes: Frame Control
+ *  1 byte:  Sequence Number
+ * 20 bytes: Addressing fields
+ * 14 bytes: Auxiliary Security Header
+ */
+#define IEEE802154_MAX_HEADER_LEN	(2 + 1 + 20 + 14)
+#define IEEE802154_MIN_HEADER_LEN	(IEEE802154_ACK_PSDU_LEN - \
+					 IEEE802154_FCS_LEN)
 
 #define IEEE802154_PAN_ID_BROADCAST	0xffff
 #define IEEE802154_ADDR_SHORT_BROADCAST	0xffff
@@ -205,6 +215,41 @@
 	IEEE802154_SCAN_IN_PROGRESS = 0xfc,
 };
 
+/* frame control handling */
+#define IEEE802154_FCTL_FTYPE		0x0003
+#define IEEE802154_FCTL_ACKREQ		0x0020
+#define IEEE802154_FCTL_INTRA_PAN	0x0040
+
+#define IEEE802154_FTYPE_DATA		0x0001
+
+/*
+ * ieee802154_is_data - check if type is IEEE802154_FTYPE_DATA
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee802154_is_data(__le16 fc)
+{
+	return (fc & cpu_to_le16(IEEE802154_FCTL_FTYPE)) ==
+		cpu_to_le16(IEEE802154_FTYPE_DATA);
+}
+
+/**
+ * ieee802154_is_ackreq - check if acknowledgment request bit is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee802154_is_ackreq(__le16 fc)
+{
+	return fc & cpu_to_le16(IEEE802154_FCTL_ACKREQ);
+}
+
+/**
+ * ieee802154_is_intra_pan - check if intra pan id communication
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee802154_is_intra_pan(__le16 fc)
+{
+	return fc & cpu_to_le16(IEEE802154_FCTL_INTRA_PAN);
+}
+
 /**
  * ieee802154_is_valid_psdu_len - check if psdu len is valid
  * available lengths:
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index dad8b00..a338a68 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -46,6 +46,12 @@
 #define BR_LEARNING_SYNC	BIT(9)
 #define BR_PROXYARP_WIFI	BIT(10)
 
+/* values as per ieee8021QBridgeFdbAgingTime */
+#define BR_MIN_AGEING_TIME	(10 * HZ)
+#define BR_MAX_AGEING_TIME	(1000000 * HZ)
+
+#define BR_DEFAULT_AGEING_TIME	(300 * HZ)
+
 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
 
 typedef int br_should_route_hook_t(struct sk_buff *skb);
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 9084292..9c9de11 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -110,7 +110,7 @@
 #define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value)
 #define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value)
 
-extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u16 proto);
+extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
 extern int igmp_rcv(struct sk_buff *);
 extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
 extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index a4328ce..ee971f3 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -171,7 +171,7 @@
 			 __be32 local, int scope);
 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
 				    __be32 mask);
-static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
+static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
 {
 	return !((addr^ifa->ifa_address)&ifa->ifa_mask);
 }
@@ -180,15 +180,15 @@
  *	Check if a mask is acceptable.
  */
  
-static __inline__ int bad_mask(__be32 mask, __be32 addr)
+static __inline__ bool bad_mask(__be32 mask, __be32 addr)
 {
 	__u32 hmask;
 	if (addr & (mask = ~mask))
-		return 1;
+		return true;
 	hmask = ntohl(mask);
 	if (hmask & (hmask+1))
-		return 1;
-	return 0;
+		return true;
+	return false;
 }
 
 #define for_primary_ifa(in_dev)	{ struct in_ifaddr *ifa; \
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index d0b380e..e38681f 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -25,6 +25,13 @@
 extern struct files_struct init_files;
 extern struct fs_struct init_fs;
 
+#ifdef CONFIG_CGROUPS
+#define INIT_GROUP_RWSEM(sig)						\
+	.group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
+#else
+#define INIT_GROUP_RWSEM(sig)
+#endif
+
 #ifdef CONFIG_CPUSETS
 #define INIT_CPUSET_SEQ(tsk)							\
 	.mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
@@ -57,6 +64,7 @@
 	INIT_PREV_CPUTIME(sig)						\
 	.cred_guard_mutex =						\
 		 __MUTEX_INITIALIZER(sig.cred_guard_mutex),		\
+	INIT_GROUP_RWSEM(sig)						\
 }
 
 extern struct nsproxy init_nsproxy;
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 3920a19..92f7177 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -68,8 +68,8 @@
 	return iova >> iova_shift(iovad);
 }
 
-int iommu_iova_cache_init(void);
-void iommu_iova_cache_destroy(void);
+int iova_cache_get(void);
+void iova_cache_put(void);
 
 struct iova *alloc_iova_mem(void);
 void free_iova_mem(struct iova *iova);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index f1f32af..0ef2a97 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -264,9 +264,9 @@
 };
 
 #if IS_ENABLED(CONFIG_IPV6)
-static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
+static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
 {
-	return inet_sk(__sk)->pinet6;
+	return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL;
 }
 
 static inline struct raw6_sock *raw6_sk(const struct sock *sk)
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 6f8b340..11bf092 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -110,8 +110,8 @@
 /*
  * Return value for chip->irq_set_affinity()
  *
- * IRQ_SET_MASK_OK	- OK, core updates irq_data.affinity
- * IRQ_SET_MASK_NOCPY	- OK, chip did update irq_data.affinity
+ * IRQ_SET_MASK_OK	- OK, core updates irq_common_data.affinity
+ * IRQ_SET_MASK_NOCPY	- OK, chip did update irq_common_data.affinity
  * IRQ_SET_MASK_OK_DONE	- Same as IRQ_SET_MASK_OK for core. Special code to
  *			  support stacked irqchips, which indicates skipping
  *			  all descendent irqchips.
@@ -129,9 +129,19 @@
  * struct irq_common_data - per irq data shared by all irqchips
  * @state_use_accessors: status information for irq chip functions.
  *			Use accessor functions to deal with it
+ * @node:		node index useful for balancing
+ * @handler_data:	per-IRQ data for the irq_chip methods
+ * @affinity:		IRQ affinity on SMP
+ * @msi_desc:		MSI descriptor
  */
 struct irq_common_data {
 	unsigned int		state_use_accessors;
+#ifdef CONFIG_NUMA
+	unsigned int		node;
+#endif
+	void			*handler_data;
+	struct msi_desc		*msi_desc;
+	cpumask_var_t		affinity;
 };
 
 /**
@@ -139,38 +149,26 @@
  * @mask:		precomputed bitmask for accessing the chip registers
  * @irq:		interrupt number
  * @hwirq:		hardware interrupt number, local to the interrupt domain
- * @node:		node index useful for balancing
  * @common:		point to data shared by all irqchips
  * @chip:		low level interrupt hardware access
  * @domain:		Interrupt translation domain; responsible for mapping
  *			between hwirq number and linux irq number.
  * @parent_data:	pointer to parent struct irq_data to support hierarchy
  *			irq_domain
- * @handler_data:	per-IRQ data for the irq_chip methods
  * @chip_data:		platform-specific per-chip private data for the chip
  *			methods, to allow shared chip implementations
- * @msi_desc:		MSI descriptor
- * @affinity:		IRQ affinity on SMP
- *
- * The fields here need to overlay the ones in irq_desc until we
- * cleaned up the direct references and switched everything over to
- * irq_data.
  */
 struct irq_data {
 	u32			mask;
 	unsigned int		irq;
 	unsigned long		hwirq;
-	unsigned int		node;
 	struct irq_common_data	*common;
 	struct irq_chip		*chip;
 	struct irq_domain	*domain;
 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
 	struct irq_data		*parent_data;
 #endif
-	void			*handler_data;
 	void			*chip_data;
-	struct msi_desc		*msi_desc;
-	cpumask_var_t		affinity;
 };
 
 /*
@@ -190,6 +188,7 @@
  * IRQD_IRQ_MASKED		- Masked state of the interrupt
  * IRQD_IRQ_INPROGRESS		- In progress state of the interrupt
  * IRQD_WAKEUP_ARMED		- Wakeup mode armed
+ * IRQD_FORWARDED_TO_VCPU	- The interrupt is forwarded to a VCPU
  */
 enum {
 	IRQD_TRIGGER_MASK		= 0xf,
@@ -204,6 +203,7 @@
 	IRQD_IRQ_MASKED			= (1 << 17),
 	IRQD_IRQ_INPROGRESS		= (1 << 18),
 	IRQD_WAKEUP_ARMED		= (1 << 19),
+	IRQD_FORWARDED_TO_VCPU		= (1 << 20),
 };
 
 #define __irqd_to_state(d)		((d)->common->state_use_accessors)
@@ -282,6 +282,20 @@
 	return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
 }
 
+static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
+{
+	return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
+}
+
+static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
+{
+	__irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
+}
+
+static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
+{
+	__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
+}
 
 /*
  * Functions for chained handlers which can be enabled/disabled by the
@@ -461,14 +475,14 @@
  * Built-in IRQ handlers for various IRQ types,
  * callable via desc->handle_irq()
  */
-extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_level_irq(struct irq_desc *desc);
+extern void handle_fasteoi_irq(struct irq_desc *desc);
+extern void handle_edge_irq(struct irq_desc *desc);
+extern void handle_edge_eoi_irq(struct irq_desc *desc);
+extern void handle_simple_irq(struct irq_desc *desc);
+extern void handle_percpu_irq(struct irq_desc *desc);
+extern void handle_percpu_devid_irq(struct irq_desc *desc);
+extern void handle_bad_irq(struct irq_desc *desc);
 extern void handle_nested_irq(unsigned int irq);
 
 extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
@@ -627,23 +641,23 @@
 static inline void *irq_get_handler_data(unsigned int irq)
 {
 	struct irq_data *d = irq_get_irq_data(irq);
-	return d ? d->handler_data : NULL;
+	return d ? d->common->handler_data : NULL;
 }
 
 static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
 {
-	return d->handler_data;
+	return d->common->handler_data;
 }
 
 static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
 {
 	struct irq_data *d = irq_get_irq_data(irq);
-	return d ? d->msi_desc : NULL;
+	return d ? d->common->msi_desc : NULL;
 }
 
 static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
 {
-	return d->msi_desc;
+	return d->common->msi_desc;
 }
 
 static inline u32 irq_get_trigger_type(unsigned int irq)
@@ -652,21 +666,30 @@
 	return d ? irqd_get_trigger_type(d) : 0;
 }
 
+static inline int irq_common_data_get_node(struct irq_common_data *d)
+{
+#ifdef CONFIG_NUMA
+	return d->node;
+#else
+	return 0;
+#endif
+}
+
 static inline int irq_data_get_node(struct irq_data *d)
 {
-	return d->node;
+	return irq_common_data_get_node(d->common);
 }
 
 static inline struct cpumask *irq_get_affinity_mask(int irq)
 {
 	struct irq_data *d = irq_get_irq_data(irq);
 
-	return d ? d->affinity : NULL;
+	return d ? d->common->affinity : NULL;
 }
 
 static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
 {
-	return d->affinity;
+	return d->common->affinity;
 }
 
 unsigned int arch_dynirq_lower_bound(unsigned int from);
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 5acfa26..a587a33 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -98,11 +98,7 @@
 
 static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
 {
-#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-	return irq_to_desc(data->irq);
-#else
-	return container_of(data, struct irq_desc, irq_data);
-#endif
+	return container_of(data->common, struct irq_desc, irq_common_data);
 }
 
 static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
@@ -127,23 +123,21 @@
 
 static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
 {
-	return desc->irq_data.handler_data;
+	return desc->irq_common_data.handler_data;
 }
 
 static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
 {
-	return desc->irq_data.msi_desc;
+	return desc->irq_common_data.msi_desc;
 }
 
 /*
  * Architectures call this to let the generic IRQ layer
- * handle an interrupt. If the descriptor is attached to an
- * irqchip-style controller then we call the ->handle_irq() handler,
- * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
+ * handle an interrupt.
  */
-static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
+static inline void generic_handle_irq_desc(struct irq_desc *desc)
 {
-	desc->handle_irq(irq, desc);
+	desc->handle_irq(desc);
 }
 
 int generic_handle_irq(unsigned int irq);
@@ -176,29 +170,6 @@
 	return irq_desc_has_action(irq_to_desc(irq));
 }
 
-/* caller has locked the irq_desc and both params are valid */
-static inline void __irq_set_handler_locked(unsigned int irq,
-					    irq_flow_handler_t handler)
-{
-	struct irq_desc *desc;
-
-	desc = irq_to_desc(irq);
-	desc->handle_irq = handler;
-}
-
-/* caller has locked the irq_desc and both params are valid */
-static inline void
-__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
-				   irq_flow_handler_t handler, const char *name)
-{
-	struct irq_desc *desc;
-
-	desc = irq_to_desc(irq);
-	irq_desc_get_irq_data(desc)->chip = chip;
-	desc->handle_irq = handler;
-	desc->name = name;
-}
-
 /**
  * irq_set_handler_locked - Set irq handler from a locked region
  * @data:	Pointer to the irq_data structure which identifies the irq
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
index 62d5430..661bed0 100644
--- a/include/linux/irqhandler.h
+++ b/include/linux/irqhandler.h
@@ -8,7 +8,7 @@
 
 struct irq_desc;
 struct irq_data;
-typedef	void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc);
+typedef	void (*irq_flow_handler_t)(struct irq_desc *desc);
 typedef	void (*irq_preflow_handler_t)(struct irq_data *data);
 
 #endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 7f653e8..f109423 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -21,8 +21,8 @@
  *
  * DEFINE_STATIC_KEY_TRUE(key);
  * DEFINE_STATIC_KEY_FALSE(key);
- * static_key_likely()
- * statick_key_unlikely()
+ * static_branch_likely()
+ * static_branch_unlikely()
  *
  * Jump labels provide an interface to generate dynamic branches using
  * self-modifying code. Assuming toolchain and architecture support, if we
@@ -45,12 +45,10 @@
  * statement, setting the key to true requires us to patch in a jump
  * to the out-of-line of true branch.
  *
- * In addtion to static_branch_{enable,disable}, we can also reference count
+ * In addition to static_branch_{enable,disable}, we can also reference count
  * the key or branch direction via static_branch_{inc,dec}. Thus,
  * static_branch_inc() can be thought of as a 'make more true' and
- * static_branch_dec() as a 'make more false'. The inc()/dec()
- * interface is meant to be used exclusively from the inc()/dec() for a given
- * key.
+ * static_branch_dec() as a 'make more false'.
  *
  * Since this relies on modifying code, the branch modifying functions
  * must be considered absolute slow paths (machine wide synchronization etc.).
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ad800e6..6452ff4 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -242,7 +242,6 @@
 	 * percpu counter.
 	 */
 	struct mem_cgroup_stat_cpu __percpu *stat;
-	spinlock_t pcp_counter_lock;
 
 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
 	struct cg_proto tcp_mem;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8eb3b19..2a0b956 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -402,17 +402,6 @@
 	u8			rsvd[8];
 };
 
-struct mlx5_cmd_query_special_contexts_mbox_in {
-	struct mlx5_inbox_hdr	hdr;
-	u8			rsvd[8];
-};
-
-struct mlx5_cmd_query_special_contexts_mbox_out {
-	struct mlx5_outbox_hdr	hdr;
-	__be32                  dump_fill_mkey;
-	__be32                  resd_lkey;
-};
-
 struct mlx5_cmd_layout {
 	u8		type;
 	u8		rsvd0[3];
@@ -440,7 +429,7 @@
 	__be32		rsvd2;
 	u8		irisc_index;
 	u8		synd;
-	__be16		ext_sync;
+	__be16		ext_synd;
 };
 
 struct mlx5_init_seg {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 27b53f9..41a3287 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -391,9 +391,10 @@
 	struct health_buffer __iomem   *health;
 	__be32 __iomem		       *health_counter;
 	struct timer_list		timer;
-	struct list_head		list;
 	u32				prev;
 	int				miss_counter;
+	struct workqueue_struct	       *wq;
+	struct work_struct		work;
 };
 
 struct mlx5_cq_table {
@@ -676,8 +677,8 @@
 int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
 int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
-void mlx5_health_cleanup(void);
-void  __init mlx5_health_init(void);
+void mlx5_health_cleanup(struct mlx5_core_dev *dev);
+int mlx5_health_init(struct mlx5_core_dev *dev);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
@@ -731,7 +732,7 @@
 #endif
 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 		       int nent, u64 mask, const char *name, struct mlx5_uar *uar);
@@ -845,7 +846,6 @@
 int mlx5_register_interface(struct mlx5_interface *intf);
 void mlx5_unregister_interface(struct mlx5_interface *intf);
 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
-int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey);
 
 struct mlx5_profile {
 	u64	mask;
@@ -866,4 +866,8 @@
 	return 8 * (1 << param);
 }
 
+enum {
+	MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
+};
+
 #endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fda728e..80001de 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -20,6 +20,7 @@
 #include <linux/shrinker.h>
 #include <linux/resource.h>
 #include <linux/page_ext.h>
+#include <linux/err.h>
 
 struct mempolicy;
 struct anon_vma;
@@ -904,6 +905,27 @@
 #endif
 }
 
+#ifdef CONFIG_MEMCG
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+	return page->mem_cgroup;
+}
+
+static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
+{
+	page->mem_cgroup = memcg;
+}
+#else
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+	return NULL;
+}
+
+static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
+{
+}
+#endif
+
 /*
  * Some inline functions in vmstat.h depend on page_zone()
  */
@@ -1214,6 +1236,49 @@
 		    int write, int force, struct page **pages);
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 			struct page **pages);
+
+/* Container for pinned pfns / pages */
+struct frame_vector {
+	unsigned int nr_allocated;	/* Number of frames we have space for */
+	unsigned int nr_frames;	/* Number of frames stored in ptrs array */
+	bool got_ref;		/* Did we pin pages by getting page ref? */
+	bool is_pfns;		/* Does array contain pages or pfns? */
+	void *ptrs[0];		/* Array of pinned pfns / pages. Use
+				 * pfns_vector_pages() or pfns_vector_pfns()
+				 * for access */
+};
+
+struct frame_vector *frame_vector_create(unsigned int nr_frames);
+void frame_vector_destroy(struct frame_vector *vec);
+int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
+		     bool write, bool force, struct frame_vector *vec);
+void put_vaddr_frames(struct frame_vector *vec);
+int frame_vector_to_pages(struct frame_vector *vec);
+void frame_vector_to_pfns(struct frame_vector *vec);
+
+static inline unsigned int frame_vector_count(struct frame_vector *vec)
+{
+	return vec->nr_frames;
+}
+
+static inline struct page **frame_vector_pages(struct frame_vector *vec)
+{
+	if (vec->is_pfns) {
+		int err = frame_vector_to_pages(vec);
+
+		if (err)
+			return ERR_PTR(err);
+	}
+	return (struct page **)(vec->ptrs);
+}
+
+static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
+{
+	if (!vec->is_pfns)
+		frame_vector_to_pfns(vec);
+	return (unsigned long *)(vec->ptrs);
+}
+
 struct kvec;
 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
 			struct page **pages);
diff --git a/include/linux/net.h b/include/linux/net.h
index 049d4b0..70ac5e2 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -24,7 +24,8 @@
 #include <linux/fcntl.h>	/* For O_CLOEXEC and O_NONBLOCK */
 #include <linux/kmemcheck.h>
 #include <linux/rcupdate.h>
-#include <linux/jump_label.h>
+#include <linux/once.h>
+
 #include <uapi/linux/net.h>
 
 struct poll_table_struct;
@@ -250,22 +251,8 @@
 	} while (0)
 #endif
 
-bool __net_get_random_once(void *buf, int nbytes, bool *done,
-			   struct static_key *done_key);
-
-#define net_get_random_once(buf, nbytes)				\
-	({								\
-		bool ___ret = false;					\
-		static bool ___done = false;				\
-		static struct static_key ___once_key =			\
-			STATIC_KEY_INIT_TRUE;				\
-		if (static_key_true(&___once_key))			\
-			___ret = __net_get_random_once(buf,		\
-						       nbytes,		\
-						       &___done,	\
-						       &___once_key);	\
-		___ret;							\
-	})
+#define net_get_random_once(buf, nbytes)			\
+	get_random_once((buf), (nbytes))
 
 int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
 		   size_t num, size_t len);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b791405..b337440 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -507,6 +507,7 @@
 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
 	smp_mb__before_atomic();
 	clear_bit(NAPI_STATE_SCHED, &n->state);
+	clear_bit(NAPI_STATE_NPSVC, &n->state);
 }
 
 #ifdef CONFIG_SMP
@@ -1257,9 +1258,10 @@
  * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
  *	change when it's running
  * @IFF_MACVLAN: Macvlan device
- * @IFF_VRF_MASTER: device is a VRF master
+ * @IFF_L3MDEV_MASTER: device is an L3 master device
  * @IFF_NO_QUEUE: device can run without qdisc attached
  * @IFF_OPENVSWITCH: device is a Open vSwitch master
+ * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
  */
 enum netdev_priv_flags {
 	IFF_802_1Q_VLAN			= 1<<0,
@@ -1282,9 +1284,10 @@
 	IFF_XMIT_DST_RELEASE_PERM	= 1<<17,
 	IFF_IPVLAN_MASTER		= 1<<18,
 	IFF_IPVLAN_SLAVE		= 1<<19,
-	IFF_VRF_MASTER			= 1<<20,
+	IFF_L3MDEV_MASTER		= 1<<20,
 	IFF_NO_QUEUE			= 1<<21,
 	IFF_OPENVSWITCH			= 1<<22,
+	IFF_L3MDEV_SLAVE		= 1<<23,
 };
 
 #define IFF_802_1Q_VLAN			IFF_802_1Q_VLAN
@@ -1307,7 +1310,7 @@
 #define IFF_XMIT_DST_RELEASE_PERM	IFF_XMIT_DST_RELEASE_PERM
 #define IFF_IPVLAN_MASTER		IFF_IPVLAN_MASTER
 #define IFF_IPVLAN_SLAVE		IFF_IPVLAN_SLAVE
-#define IFF_VRF_MASTER			IFF_VRF_MASTER
+#define IFF_L3MDEV_MASTER		IFF_L3MDEV_MASTER
 #define IFF_NO_QUEUE			IFF_NO_QUEUE
 #define IFF_OPENVSWITCH			IFF_OPENVSWITCH
 
@@ -1426,7 +1429,6 @@
  *	@dn_ptr:	DECnet specific data
  *	@ip6_ptr:	IPv6 specific data
  *	@ax25_ptr:	AX.25 specific data
- *	@vrf_ptr:	VRF specific data
  *	@ieee80211_ptr:	IEEE 802.11 specific data, assign before registering
  *
  *	@last_rx:	Time of last Rx
@@ -1586,6 +1588,9 @@
 #ifdef CONFIG_NET_SWITCHDEV
 	const struct switchdev_ops *switchdev_ops;
 #endif
+#ifdef CONFIG_NET_L3_MASTER_DEV
+	const struct l3mdev_ops	*l3mdev_ops;
+#endif
 
 	const struct header_ops *header_ops;
 
@@ -1645,7 +1650,6 @@
 	struct dn_dev __rcu     *dn_ptr;
 	struct inet6_dev __rcu	*ip6_ptr;
 	void			*ax25_ptr;
-	struct net_vrf_dev __rcu *vrf_ptr;
 	struct wireless_dev	*ieee80211_ptr;
 	struct wpan_dev		*ieee802154_ptr;
 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
@@ -3823,9 +3827,14 @@
 	return dev->priv_flags & IFF_SUPP_NOFCS;
 }
 
-static inline bool netif_is_vrf(const struct net_device *dev)
+static inline bool netif_is_l3_master(const struct net_device *dev)
 {
-	return dev->priv_flags & IFF_VRF_MASTER;
+	return dev->priv_flags & IFF_L3MDEV_MASTER;
+}
+
+static inline bool netif_is_l3_slave(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_L3MDEV_SLAVE;
 }
 
 static inline bool netif_is_bridge_master(const struct net_device *dev)
@@ -3838,27 +3847,6 @@
 	return dev->priv_flags & IFF_OPENVSWITCH;
 }
 
-static inline bool netif_index_is_vrf(struct net *net, int ifindex)
-{
-	bool rc = false;
-
-#if IS_ENABLED(CONFIG_NET_VRF)
-	struct net_device *dev;
-
-	if (ifindex == 0)
-		return false;
-
-	rcu_read_lock();
-
-	dev = dev_get_by_index_rcu(net, ifindex);
-	if (dev)
-		rc = netif_is_vrf(dev);
-
-	rcu_read_unlock();
-#endif
-	return rc;
-}
-
 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
 static inline void netif_keep_dst(struct net_device *dev)
 {
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 0b4d456..165ab2d 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -80,7 +80,7 @@
 	p->okfn = okfn;
 }
 
-typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
+typedef unsigned int nf_hookfn(void *priv,
 			       struct sk_buff *skb,
 			       const struct nf_hook_state *state);
 
@@ -283,7 +283,7 @@
 				 struct flowi *fl, bool strict);
 	void		(*saveroute)(const struct sk_buff *skb,
 				     struct nf_queue_entry *entry);
-	int		(*reroute)(struct sk_buff *skb,
+	int		(*reroute)(struct net *net, struct sk_buff *skb,
 				   const struct nf_queue_entry *entry);
 	int		route_key_size;
 };
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index e955d47..249d1bb 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -45,11 +45,11 @@
 void nfnl_lock(__u8 subsys_id);
 void nfnl_unlock(__u8 subsys_id);
 #ifdef CONFIG_PROVE_LOCKING
-int lockdep_nfnl_is_held(__u8 subsys_id);
+bool lockdep_nfnl_is_held(__u8 subsys_id);
 #else
-static inline int lockdep_nfnl_is_held(__u8 subsys_id)
+static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
 {
-	return 1;
+	return true;
 }
 #endif /* CONFIG_PROVE_LOCKING */
 
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index b006b71..c557741 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -13,6 +13,7 @@
  * @target:	the target extension
  * @matchinfo:	per-match data
  * @targetinfo:	per-target data
+ * @net		network namespace through which the action was invoked
  * @in:		input netdevice
  * @out:	output netdevice
  * @fragoff:	packet is a fragment, this is the data offset
@@ -24,7 +25,6 @@
  * Fields written to by extensions:
  *
  * @hotdrop:	drop packet if we had inspection problems
- * Network namespace obtainable using dev_net(in/out)
  */
 struct xt_action_param {
 	union {
@@ -34,6 +34,7 @@
 	union {
 		const void *matchinfo, *targinfo;
 	};
+	struct net *net;
 	const struct net_device *in, *out;
 	int fragoff;
 	unsigned int thoff;
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index c22a7fb..6f074db 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -53,7 +53,6 @@
 					    const struct arpt_replace *repl);
 extern void arpt_unregister_table(struct xt_table *table);
 extern unsigned int arpt_do_table(struct sk_buff *skb,
-				  unsigned int hook,
 				  const struct nf_hook_state *state,
 				  struct xt_table *table);
 
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 8ca6d64..2ea517c 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -111,9 +111,9 @@
 extern struct ebt_table *ebt_register_table(struct net *net,
 					    const struct ebt_table *table);
 extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
-extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb,
-   const struct net_device *in, const struct net_device *out,
-   struct ebt_table *table);
+extern unsigned int ebt_do_table(struct sk_buff *skb,
+				 const struct nf_hook_state *state,
+				 struct ebt_table *table);
 
 /* Used in the kernel match() functions */
 #define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg))
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 6e4591b..98c03b2 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -6,7 +6,7 @@
 
 #include <uapi/linux/netfilter_ipv4.h>
 
-int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
+int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
 __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
 		       unsigned int dataoff, u_int8_t protocol);
 #endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 4073510..aa598f9 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -64,7 +64,6 @@
 
 extern void *ipt_alloc_initial_table(const struct xt_table *);
 extern unsigned int ipt_do_table(struct sk_buff *skb,
-				 unsigned int hook,
 				 const struct nf_hook_state *state,
 				 struct xt_table *table);
 
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 7715746..47c6b04 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -17,12 +17,12 @@
 	int (*chk_addr)(struct net *net, const struct in6_addr *addr,
 			const struct net_device *dev, int strict);
 	void (*route_input)(struct sk_buff *skb);
-	int (*fragment)(struct sock *sk, struct sk_buff *skb,
-			int (*output)(struct sock *, struct sk_buff *));
+	int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
+			int (*output)(struct net *, struct sock *, struct sk_buff *));
 };
 
 #ifdef CONFIG_NETFILTER
-int ip6_route_me_harder(struct sk_buff *skb);
+int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
 __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
 			unsigned int dataoff, u_int8_t protocol);
 
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index b40d2b6..0f76e5c 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -30,7 +30,6 @@
 					    const struct ip6t_replace *repl);
 extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
 extern unsigned int ip6t_do_table(struct sk_buff *skb,
-				  unsigned int hook,
 				  const struct nf_hook_state *state,
 				  struct xt_table *table);
 
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index b02f72b..f798e2a 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -522,10 +522,9 @@
  * @speed:	OUT - The link speed expressed as PCIe generation number.
  * @width:	OUT - The link width expressed as the number of PCIe lanes.
  *
- * Set the translation of a memory window.  The peer may access local memory
- * through the window starting at the address, up to the size.  The address
- * must be aligned to the alignment specified by ntb_mw_get_range().  The size
- * must be aligned to the size alignment specified by ntb_mw_get_range().
+ * Get the current state of the ntb link.  It is recommended to query the link
+ * state once after every link event.  It is safe to query the link state in
+ * the context of the link event callback.
  *
  * Return: One if the link is up, zero if the link is down, otherwise a
  *		negative value indicating the error number.
@@ -795,7 +794,7 @@
 }
 
 /**
- * ntb_peer_db_clear() - clear bits in the local doorbell register
+ * ntb_peer_db_clear() - clear bits in the peer doorbell register
  * @ntb:	NTB device context.
  * @db_bits:	Doorbell bits to clear.
  *
diff --git a/include/linux/ntb_transport.h b/include/linux/ntb_transport.h
index 2862861..7243eb9 100644
--- a/include/linux/ntb_transport.h
+++ b/include/linux/ntb_transport.h
@@ -83,3 +83,4 @@
 void ntb_transport_link_up(struct ntb_transport_qp *qp);
 void ntb_transport_link_down(struct ntb_transport_qp *qp);
 bool ntb_transport_link_query(struct ntb_transport_qp *qp);
+unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp);
diff --git a/include/linux/once.h b/include/linux/once.h
new file mode 100644
index 0000000..285f12c
--- /dev/null
+++ b/include/linux/once.h
@@ -0,0 +1,57 @@
+#ifndef _LINUX_ONCE_H
+#define _LINUX_ONCE_H
+
+#include <linux/types.h>
+#include <linux/jump_label.h>
+
+bool __do_once_start(bool *done, unsigned long *flags);
+void __do_once_done(bool *done, struct static_key *once_key,
+		    unsigned long *flags);
+
+/* Call a function exactly once. The idea of DO_ONCE() is to perform
+ * a function call such as initialization of random seeds, etc, only
+ * once, where DO_ONCE() can live in the fast-path. After @func has
+ * been called with the passed arguments, the static key will patch
+ * out the condition into a nop. DO_ONCE() guarantees type safety of
+ * arguments!
+ *
+ * Not that the following is not equivalent ...
+ *
+ *   DO_ONCE(func, arg);
+ *   DO_ONCE(func, arg);
+ *
+ * ... to this version:
+ *
+ *   void foo(void)
+ *   {
+ *     DO_ONCE(func, arg);
+ *   }
+ *
+ *   foo();
+ *   foo();
+ *
+ * In case the one-time invocation could be triggered from multiple
+ * places, then a common helper function must be defined, so that only
+ * a single static key will be placed there!
+ */
+#define DO_ONCE(func, ...)						     \
+	({								     \
+		bool ___ret = false;					     \
+		static bool ___done = false;				     \
+		static struct static_key ___once_key = STATIC_KEY_INIT_TRUE; \
+		if (static_key_true(&___once_key)) {			     \
+			unsigned long ___flags;				     \
+			___ret = __do_once_start(&___done, &___flags);	     \
+			if (unlikely(___ret)) {				     \
+				func(__VA_ARGS__);			     \
+				__do_once_done(&___done, &___once_key,	     \
+					       &___flags);		     \
+			}						     \
+		}							     \
+		___ret;							     \
+	})
+
+#define get_random_once(buf, nbytes)					     \
+	DO_ONCE(get_random_bytes, (buf), (nbytes))
+
+#endif /* _LINUX_ONCE_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 962387a..4c477e6 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -19,6 +19,7 @@
 #include <linux/spinlock.h>
 #include <linux/ethtool.h>
 #include <linux/mii.h>
+#include <linux/module.h>
 #include <linux/timer.h>
 #include <linux/workqueue.h>
 #include <linux/mod_devicetable.h>
@@ -153,6 +154,7 @@
  * PHYs should register using this structure
  */
 struct mii_bus {
+	struct module *owner;
 	const char *name;
 	char id[MII_BUS_ID_SIZE];
 	void *priv;
@@ -198,7 +200,8 @@
 	return mdiobus_alloc_size(0);
 }
 
-int mdiobus_register(struct mii_bus *bus);
+int __mdiobus_register(struct mii_bus *bus, struct module *owner);
+#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE)
 void mdiobus_unregister(struct mii_bus *bus);
 void mdiobus_free(struct mii_bus *bus);
 struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv);
@@ -742,6 +745,7 @@
 				     struct phy_c45_device_ids *c45_ids);
 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
 int phy_device_register(struct phy_device *phy);
+void phy_device_remove(struct phy_device *phydev);
 int phy_init_hw(struct phy_device *phydev);
 int phy_suspend(struct phy_device *phydev);
 int phy_resume(struct phy_device *phydev);
@@ -794,6 +798,7 @@
 int phy_start_interrupts(struct phy_device *phydev);
 void phy_print_status(struct phy_device *phydev);
 void phy_device_free(struct phy_device *phydev);
+int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
 
 int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
 		       int (*run)(struct phy_device *));
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cab7ba5..e817722 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -34,6 +34,7 @@
 
 int dev_pm_opp_get_opp_count(struct device *dev);
 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
+struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
 
 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
 					      unsigned long freq,
@@ -80,6 +81,11 @@
 	return 0;
 }
 
+static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
+{
+	return NULL;
+}
+
 static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
 					unsigned long freq, bool available)
 {
diff --git a/include/linux/random.h b/include/linux/random.h
index e651874..a75840c 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -7,6 +7,8 @@
 #define _LINUX_RANDOM_H
 
 #include <linux/list.h>
+#include <linux/once.h>
+
 #include <uapi/linux/random.h>
 
 struct random_ready_callback {
@@ -45,6 +47,10 @@
 
 u32 prandom_u32_state(struct rnd_state *state);
 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
+void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
+
+#define prandom_init_once(pcpu_state)			\
+	DO_ONCE(prandom_seed_full_state, (pcpu_state))
 
 /**
  * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index ff47651..581abf8 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -230,12 +230,11 @@
 		   struct rcu_synchronize *rs_array);
 
 #define _wait_rcu_gp(checktiny, ...) \
-do { \
-	call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
-	const int __n = ARRAY_SIZE(__crcu_array); \
-	struct rcu_synchronize __rs_array[__n]; \
-	\
-	__wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \
+do {									\
+	call_rcu_func_t __crcu_array[] = { __VA_ARGS__ };		\
+	struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)];	\
+	__wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array),		\
+			__crcu_array, __rs_array);			\
 } while (0)
 
 #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 8fc0bfd..b49d413 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -296,6 +296,8 @@
 				  unsigned int *val);
 typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
 				   unsigned int val);
+typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg,
+					 unsigned int mask, unsigned int val);
 typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
 typedef void (*regmap_hw_free_context)(void *context);
 
@@ -335,6 +337,7 @@
 	regmap_hw_gather_write gather_write;
 	regmap_hw_async_write async_write;
 	regmap_hw_reg_write reg_write;
+	regmap_hw_reg_update_bits reg_update_bits;
 	regmap_hw_read read;
 	regmap_hw_reg_read reg_read;
 	regmap_hw_free_context free_context;
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 39adaa9..4be5048 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -33,11 +33,11 @@
 extern struct mutex net_mutex;
 
 #ifdef CONFIG_PROVE_LOCKING
-extern int lockdep_rtnl_is_held(void);
+extern bool lockdep_rtnl_is_held(void);
 #else
-static inline int lockdep_rtnl_is_held(void)
+static inline bool lockdep_rtnl_is_held(void)
 {
-	return 1;
+	return true;
 }
 #endif /* #ifdef CONFIG_PROVE_LOCKING */
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a4ab9da..4817df5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -762,6 +762,18 @@
 	unsigned audit_tty_log_passwd;
 	struct tty_audit_buf *tty_audit_buf;
 #endif
+#ifdef CONFIG_CGROUPS
+	/*
+	 * group_rwsem prevents new tasks from entering the threadgroup and
+	 * member tasks from exiting,a more specifically, setting of
+	 * PF_EXITING.  fork and exit paths are protected with this rwsem
+	 * using threadgroup_change_begin/end().  Users which require
+	 * threadgroup to remain stable should use threadgroup_[un]lock()
+	 * which also takes care of exec path.  Currently, cgroup is the
+	 * only user.
+	 */
+	struct rw_semaphore group_rwsem;
+#endif
 
 	oom_flags_t oom_flags;
 	short oom_score_adj;		/* OOM kill score adjustment */
@@ -828,7 +840,7 @@
 	struct hlist_node uidhash_node;
 	kuid_t uid;
 
-#ifdef CONFIG_PERF_EVENTS
+#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
 	atomic_long_t locked_vm;
 #endif
 };
diff --git a/include/linux/security.h b/include/linux/security.h
index 79d85dd..2f4c1f7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -946,7 +946,7 @@
 				      unsigned long arg4,
 				      unsigned long arg5)
 {
-	return cap_task_prctl(option, arg2, arg3, arg3, arg5);
+	return cap_task_prctl(option, arg2, arg3, arg4, arg5);
 }
 
 static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index adeadbd..dde00de 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -114,13 +114,18 @@
 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
 loff_t seq_lseek(struct file *, loff_t, int);
 int seq_release(struct inode *, struct file *);
-int seq_escape(struct seq_file *, const char *, const char *);
-int seq_putc(struct seq_file *m, char c);
-int seq_puts(struct seq_file *m, const char *s);
 int seq_write(struct seq_file *seq, const void *data, size_t len);
 
-__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
-__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
+__printf(2, 0)
+void seq_vprintf(struct seq_file *m, const char *fmt, va_list args);
+__printf(2, 3)
+void seq_printf(struct seq_file *m, const char *fmt, ...);
+void seq_putc(struct seq_file *m, char c);
+void seq_puts(struct seq_file *m, const char *s);
+void seq_put_decimal_ull(struct seq_file *m, char delimiter,
+			 unsigned long long num);
+void seq_put_decimal_ll(struct seq_file *m, char delimiter, long long num);
+void seq_escape(struct seq_file *m, const char *s, const char *esc);
 
 void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
 		  int rowsize, int groupsize, const void *buf, size_t len,
@@ -138,10 +143,6 @@
 void *__seq_open_private(struct file *, const struct seq_operations *, int);
 int seq_open_private(struct file *, const struct seq_operations *, int);
 int seq_release_private(struct inode *, struct file *);
-int seq_put_decimal_ull(struct seq_file *m, char delimiter,
-			unsigned long long num);
-int seq_put_decimal_ll(struct seq_file *m, char delimiter,
-			long long num);
 
 static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
 {
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2738d35..4398411 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -179,6 +179,9 @@
 	u8			bridged_dnat:1;
 	__u16			frag_max_size;
 	struct net_device	*physindev;
+
+	/* always valid & non-NULL from FORWARD on, for physdev match */
+	struct net_device	*physoutdev;
 	union {
 		/* prerouting: detect dnat in orig/reply direction */
 		__be32          ipv4_daddr;
@@ -189,9 +192,6 @@
 		 * skb is out in neigh layer.
 		 */
 		char neigh_header[8];
-
-		/* always valid & non-NULL from FORWARD on, for physdev match */
-		struct net_device *physoutdev;
 	};
 };
 #endif
@@ -2707,6 +2707,9 @@
 {
 	if (skb->ip_summed == CHECKSUM_COMPLETE)
 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
+	else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+		 skb_checksum_start_offset(skb) < 0)
+		skb->ip_summed = CHECKSUM_NONE;
 }
 
 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 269e8af..6b00f18 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -34,7 +34,7 @@
 
 /**
  * struct spi_statistics - statistics for spi transfers
- * @clock:         lock protecting this structure
+ * @lock:          lock protecting this structure
  *
  * @messages:      number of spi-messages handled
  * @transfers:     number of spi_transfers handled
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 7591788..357e44c 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -42,6 +42,7 @@
 	/*
 	 * Connection of transports
 	 */
+	unsigned long		sock_state;
 	struct delayed_work	connect_worker;
 	struct sockaddr_storage	srcaddr;
 	unsigned short		srcport;
@@ -76,6 +77,8 @@
  */
 #define TCP_RPC_REPLY		(1UL << 6)
 
+#define XPRT_SOCK_CONNECTING	1U
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 0800131..a460e2e 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -885,4 +885,6 @@
 			const char __user *const __user *argv,
 			const char __user *const __user *envp, int flags);
 
+asmlinkage long sys_membarrier(int cmd, int flags);
+
 #endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 937b978..86a7eda 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -112,11 +112,11 @@
 struct tcp_request_sock {
 	struct inet_request_sock 	req;
 	const struct tcp_request_sock_ops *af_specific;
+	struct skb_mstamp		snt_synack; /* first SYNACK sent time */
 	bool				tfo_listener;
 	u32				txhash;
 	u32				rcv_isn;
 	u32				snt_isn;
-	u32				snt_synack; /* synack sent time */
 	u32				last_oow_ack_time; /* last SYNACK */
 	u32				rcv_nxt; /* the ack # by SYNACK. For
 						  * FastOpen it's the seq#
@@ -356,8 +356,8 @@
 
 struct tcp_timewait_sock {
 	struct inet_timewait_sock tw_sk;
-	u32			  tw_rcv_nxt;
-	u32			  tw_snd_nxt;
+#define tw_rcv_nxt tw_sk.__tw_common.skc_tw_rcv_nxt
+#define tw_snd_nxt tw_sk.__tw_common.skc_tw_snd_nxt
 	u32			  tw_rcv_wnd;
 	u32			  tw_ts_offset;
 	u32			  tw_ts_recent;
@@ -382,25 +382,11 @@
 		tcp_sk(sk)->fastopen_rsk != NULL);
 }
 
-extern void tcp_sock_destruct(struct sock *sk);
-
-static inline int fastopen_init_queue(struct sock *sk, int backlog)
+static inline void fastopen_queue_tune(struct sock *sk, int backlog)
 {
-	struct request_sock_queue *queue =
-	    &inet_csk(sk)->icsk_accept_queue;
+	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
 
-	if (queue->fastopenq == NULL) {
-		queue->fastopenq = kzalloc(
-		    sizeof(struct fastopen_queue),
-		    sk->sk_allocation);
-		if (queue->fastopenq == NULL)
-			return -ENOMEM;
-
-		sk->sk_destruct = tcp_sock_destruct;
-		spin_lock_init(&queue->fastopenq->lock);
-	}
-	queue->fastopenq->max_qlen = backlog;
-	return 0;
+	queue->fastopenq.max_qlen = backlog;
 }
 
 static inline void tcp_saved_syn_free(struct tcp_sock *tp)
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 037e9df..157d366 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -92,23 +92,19 @@
 		     struct thermal_cooling_device *);
 	int (*unbind) (struct thermal_zone_device *,
 		       struct thermal_cooling_device *);
-	int (*get_temp) (struct thermal_zone_device *, unsigned long *);
+	int (*get_temp) (struct thermal_zone_device *, int *);
 	int (*get_mode) (struct thermal_zone_device *,
 			 enum thermal_device_mode *);
 	int (*set_mode) (struct thermal_zone_device *,
 		enum thermal_device_mode);
 	int (*get_trip_type) (struct thermal_zone_device *, int,
 		enum thermal_trip_type *);
-	int (*get_trip_temp) (struct thermal_zone_device *, int,
-			      unsigned long *);
-	int (*set_trip_temp) (struct thermal_zone_device *, int,
-			      unsigned long);
-	int (*get_trip_hyst) (struct thermal_zone_device *, int,
-			      unsigned long *);
-	int (*set_trip_hyst) (struct thermal_zone_device *, int,
-			      unsigned long);
-	int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
-	int (*set_emul_temp) (struct thermal_zone_device *, unsigned long);
+	int (*get_trip_temp) (struct thermal_zone_device *, int, int *);
+	int (*set_trip_temp) (struct thermal_zone_device *, int, int);
+	int (*get_trip_hyst) (struct thermal_zone_device *, int, int *);
+	int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
+	int (*get_crit_temp) (struct thermal_zone_device *, int *);
+	int (*set_emul_temp) (struct thermal_zone_device *, int);
 	int (*get_trend) (struct thermal_zone_device *, int,
 			  enum thermal_trend *);
 	int (*notify) (struct thermal_zone_device *, int,
@@ -332,9 +328,9 @@
  *		   temperature.
  */
 struct thermal_zone_of_device_ops {
-	int (*get_temp)(void *, long *);
+	int (*get_temp)(void *, int *);
 	int (*get_trend)(void *, long *);
-	int (*set_emul_temp)(void *, unsigned long);
+	int (*set_emul_temp)(void *, int);
 };
 
 /**
@@ -364,7 +360,7 @@
 thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
 				const struct thermal_zone_of_device_ops *ops)
 {
-	return NULL;
+	return ERR_PTR(-ENODEV);
 }
 
 static inline
@@ -384,6 +380,8 @@
 
 int power_actor_get_max_power(struct thermal_cooling_device *,
 			      struct thermal_zone_device *tz, u32 *max_power);
+int power_actor_get_min_power(struct thermal_cooling_device *,
+			      struct thermal_zone_device *tz, u32 *min_power);
 int power_actor_set_power(struct thermal_cooling_device *,
 			  struct thermal_instance *, u32);
 struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
@@ -406,7 +404,7 @@
 				   const struct thermal_cooling_device_ops *);
 void thermal_cooling_device_unregister(struct thermal_cooling_device *);
 struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
-int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp);
+int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
 
 int get_tz_trend(struct thermal_zone_device *, int);
 struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
@@ -419,6 +417,10 @@
 static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev,
 			      struct thermal_zone_device *tz, u32 *max_power)
 { return 0; }
+static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev,
+					    struct thermal_zone_device *tz,
+					    u32 *min_power)
+{ return -ENODEV; }
 static inline int power_actor_set_power(struct thermal_cooling_device *cdev,
 			  struct thermal_instance *tz, u32 power)
 { return 0; }
@@ -457,7 +459,7 @@
 		const char *name)
 { return ERR_PTR(-ENODEV); }
 static inline int thermal_zone_get_temp(
-		struct thermal_zone_device *tz, unsigned long *temp)
+		struct thermal_zone_device *tz, int *temp)
 { return -ENODEV; }
 static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
 { return -ENODEV; }
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 48d901f..e312219 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -147,11 +147,20 @@
 		cpumask_or(mask, mask, tick_nohz_full_mask);
 }
 
+static inline int housekeeping_any_cpu(void)
+{
+	return cpumask_any_and(housekeeping_mask, cpu_online_mask);
+}
+
 extern void tick_nohz_full_kick(void);
 extern void tick_nohz_full_kick_cpu(int cpu);
 extern void tick_nohz_full_kick_all(void);
 extern void __tick_nohz_task_switch(void);
 #else
+static inline int housekeeping_any_cpu(void)
+{
+	return smp_processor_id();
+}
 static inline bool tick_nohz_full_enabled(void) { return false; }
 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
 static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
diff --git a/include/linux/wait.h b/include/linux/wait.h
index d3d0772..1e1bf9f 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -147,8 +147,7 @@
 
 typedef int wait_bit_action_f(struct wait_bit_key *);
 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr,
-			  void *key);
+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
@@ -180,7 +179,7 @@
 #define wake_up_poll(x, m)						\
 	__wake_up(x, TASK_NORMAL, 1, (void *) (m))
 #define wake_up_locked_poll(x, m)					\
-	__wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m))
+	__wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
 #define wake_up_interruptible_poll(x, m)				\
 	__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
 #define wake_up_interruptible_sync_poll(x, m)				\
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index 9f36641..6513c7e 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -15,6 +15,7 @@
 #define _MEDIA_VIDEOBUF2_MEMOPS_H
 
 #include <media/videobuf2-core.h>
+#include <linux/mm.h>
 
 /**
  * struct vb2_vmarea_handler - common vma refcount tracking handler
@@ -31,11 +32,9 @@
 
 extern const struct vm_operations_struct vb2_common_vm_ops;
 
-int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
-			   struct vm_area_struct **res_vma, dma_addr_t *res_pa);
-
-struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma);
-void vb2_put_vma(struct vm_area_struct *vma);
-
+struct frame_vector *vb2_create_framevec(unsigned long start,
+					 unsigned long length,
+					 bool write);
+void vb2_destroy_framevec(struct frame_vector *vec);
 
 #endif
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index a2f59ec..07db532 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -61,6 +61,16 @@
 #define UIP_PROTO_UDP			17 /* ipv6 next header value for UDP */
 #define UIP_FRAGH_LEN			8  /* ipv6 fragment header size */
 
+#define EUI64_ADDR_LEN		8
+
+#define LOWPAN_NHC_MAX_ID_LEN	1
+/* Max IPHC Header len without IPv6 hdr specific inline data.
+ * Useful for getting the "extra" bytes we need at worst case compression.
+ *
+ * LOWPAN_IPHC + CID + LOWPAN_NHC_MAX_ID_LEN
+ */
+#define LOWPAN_IPHC_MAX_HEADER_LEN	(2 + 1 + LOWPAN_NHC_MAX_ID_LEN)
+
 /*
  * ipv6 address based on mac
  * second bit-flip (Universe/Local) is done according RFC2464
@@ -126,13 +136,19 @@
 	 (((a)[6]) == 0xFF) &&	\
 	 (((a)[7]) == 0xFF))
 
-#define LOWPAN_DISPATCH_IPV6	0x41 /* 01000001 = 65 */
-#define LOWPAN_DISPATCH_HC1	0x42 /* 01000010 = 66 */
-#define LOWPAN_DISPATCH_IPHC	0x60 /* 011xxxxx = ... */
-#define LOWPAN_DISPATCH_FRAG1	0xc0 /* 11000xxx */
-#define LOWPAN_DISPATCH_FRAGN	0xe0 /* 11100xxx */
+#define LOWPAN_DISPATCH_IPV6		0x41 /* 01000001 = 65 */
+#define LOWPAN_DISPATCH_IPHC		0x60 /* 011xxxxx = ... */
+#define LOWPAN_DISPATCH_IPHC_MASK	0xe0
 
-#define LOWPAN_DISPATCH_MASK	0xf8 /* 11111000 */
+static inline bool lowpan_is_ipv6(u8 dispatch)
+{
+	return dispatch == LOWPAN_DISPATCH_IPV6;
+}
+
+static inline bool lowpan_is_iphc(u8 dispatch)
+{
+	return (dispatch & LOWPAN_DISPATCH_IPHC_MASK) == LOWPAN_DISPATCH_IPHC;
+}
 
 #define LOWPAN_FRAG_TIMEOUT	(HZ * 60)	/* time-out 60 sec */
 
@@ -218,6 +234,19 @@
 	return netdev_priv(dev);
 }
 
+struct lowpan_802154_cb {
+	u16 d_tag;
+	unsigned int d_size;
+	u8 d_offset;
+};
+
+static inline
+struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb)
+{
+	BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(skb->cb));
+	return (struct lowpan_802154_cb *)skb->cb;
+}
+
 #ifdef DEBUG
 /* print data in line */
 static inline void raw_dump_inline(const char *caller, char *msg,
@@ -280,119 +309,6 @@
 	*hc_ptr += len;
 }
 
-static inline u8 lowpan_addr_mode_size(const u8 addr_mode)
-{
-	static const u8 addr_sizes[] = {
-		[LOWPAN_IPHC_ADDR_00] = 16,
-		[LOWPAN_IPHC_ADDR_01] = 8,
-		[LOWPAN_IPHC_ADDR_02] = 2,
-		[LOWPAN_IPHC_ADDR_03] = 0,
-	};
-	return addr_sizes[addr_mode];
-}
-
-static inline u8 lowpan_next_hdr_size(const u8 h_enc, u16 *uncomp_header)
-{
-	u8 ret = 1;
-
-	if ((h_enc & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
-		*uncomp_header += sizeof(struct udphdr);
-
-		switch (h_enc & LOWPAN_NHC_UDP_CS_P_11) {
-		case LOWPAN_NHC_UDP_CS_P_00:
-			ret += 4;
-			break;
-		case LOWPAN_NHC_UDP_CS_P_01:
-		case LOWPAN_NHC_UDP_CS_P_10:
-			ret += 3;
-			break;
-		case LOWPAN_NHC_UDP_CS_P_11:
-			ret++;
-			break;
-		default:
-			break;
-		}
-
-		if (!(h_enc & LOWPAN_NHC_UDP_CS_C))
-			ret += 2;
-	}
-
-	return ret;
-}
-
-/**
- *	lowpan_uncompress_size - returns skb->len size with uncompressed header
- *	@skb: sk_buff with 6lowpan header inside
- *	@datagram_offset: optional to get the datagram_offset value
- *
- *	Returns the skb->len with uncompressed header
- */
-static inline u16
-lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset)
-{
-	u16 ret = 2, uncomp_header = sizeof(struct ipv6hdr);
-	u8 iphc0, iphc1, h_enc;
-
-	iphc0 = skb_network_header(skb)[0];
-	iphc1 = skb_network_header(skb)[1];
-
-	switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
-	case 0:
-		ret += 4;
-		break;
-	case 1:
-		ret += 3;
-		break;
-	case 2:
-		ret++;
-		break;
-	default:
-		break;
-	}
-
-	if (!(iphc0 & LOWPAN_IPHC_NH_C))
-		ret++;
-
-	if (!(iphc0 & 0x03))
-		ret++;
-
-	ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_SAM) >>
-				     LOWPAN_IPHC_SAM_BIT);
-
-	if (iphc1 & LOWPAN_IPHC_M) {
-		switch ((iphc1 & LOWPAN_IPHC_DAM_11) >>
-			LOWPAN_IPHC_DAM_BIT) {
-		case LOWPAN_IPHC_DAM_00:
-			ret += 16;
-			break;
-		case LOWPAN_IPHC_DAM_01:
-			ret += 6;
-			break;
-		case LOWPAN_IPHC_DAM_10:
-			ret += 4;
-			break;
-		case LOWPAN_IPHC_DAM_11:
-			ret++;
-			break;
-		default:
-			break;
-		}
-	} else {
-		ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_DAM_11) >>
-					     LOWPAN_IPHC_DAM_BIT);
-	}
-
-	if (iphc0 & LOWPAN_IPHC_NH_C) {
-		h_enc = skb_network_header(skb)[ret];
-		ret += lowpan_next_hdr_size(h_enc, &uncomp_header);
-	}
-
-	if (dgram_offset)
-		*dgram_offset = uncomp_header;
-
-	return skb->len + uncomp_header - ret;
-}
-
 void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
 
 int
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index b5474b1..78003df 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -192,8 +192,7 @@
 	int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
 			       struct dst_entry **dst, struct flowi6 *fl6);
 	void (*udpv6_encap_enable)(void);
-	void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh,
-			      const struct in6_addr *daddr,
+	void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr,
 			      const struct in6_addr *solicited_addr,
 			      bool router, bool solicited, bool override, bool inc_opt);
 	struct neigh_table *nd_tbl;
diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h
index 7d38e2f..a5563d2 100644
--- a/include/net/af_ieee802154.h
+++ b/include/net/af_ieee802154.h
@@ -1,5 +1,5 @@
 /*
- * IEEE 802.15.4 inteface for userspace
+ * IEEE 802.15.4 interface for userspace
  *
  * Copyright 2007, 2008 Siemens AG
  *
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 4a167b3..cb1b9bb 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -63,7 +63,11 @@
 #define UNIX_GC_MAYBE_CYCLE	1
 	struct socket_wq	peer_wq;
 };
-#define unix_sk(__sk) ((struct unix_sock *)__sk)
+
+static inline struct unix_sock *unix_sk(struct sock *sk)
+{
+	return (struct unix_sock *)sk;
+}
 
 #define peer_wait peer_wq.wait
 
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 38d8a34..c4defef 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -122,12 +122,28 @@
 __printf(1, 2)
 void bt_info(const char *fmt, ...);
 __printf(1, 2)
+void bt_warn(const char *fmt, ...);
+__printf(1, 2)
 void bt_err(const char *fmt, ...);
+__printf(1, 2)
+void bt_err_ratelimited(const char *fmt, ...);
 
 #define BT_INFO(fmt, ...)	bt_info(fmt "\n", ##__VA_ARGS__)
+#define BT_WARN(fmt, ...)	bt_warn(fmt "\n", ##__VA_ARGS__)
 #define BT_ERR(fmt, ...)	bt_err(fmt "\n", ##__VA_ARGS__)
 #define BT_DBG(fmt, ...)	pr_debug(fmt "\n", ##__VA_ARGS__)
 
+#define BT_ERR_RATELIMITED(fmt, ...) bt_err_ratelimited(fmt "\n", ##__VA_ARGS__)
+
+#define bt_dev_info(hdev, fmt, ...)				\
+	BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+#define bt_dev_warn(hdev, fmt, ...)				\
+	BT_WARN("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+#define bt_dev_err(hdev, fmt, ...)				\
+	BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+#define bt_dev_dbg(hdev, fmt, ...)				\
+	BT_DBG("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+
 /* Connection and socket states */
 enum {
 	BT_CONNECTED = 1, /* Equal to TCP_ESTABLISHED to make net code happy */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 7ca6690..a26ff28 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -44,6 +44,8 @@
 #define HCI_DEV_DOWN			4
 #define HCI_DEV_SUSPEND			5
 #define HCI_DEV_RESUME			6
+#define HCI_DEV_OPEN			7
+#define HCI_DEV_CLOSE			8
 
 /* HCI notify events */
 #define HCI_NOTIFY_CONN_ADD		1
@@ -238,6 +240,7 @@
 	HCI_LE_SCAN_INTERRUPTED,
 
 	HCI_DUT_MODE,
+	HCI_VENDOR_DIAG,
 	HCI_FORCE_BREDR_SMP,
 	HCI_FORCE_STATIC_ADDR,
 
@@ -260,6 +263,7 @@
 #define HCI_ACLDATA_PKT		0x02
 #define HCI_SCODATA_PKT		0x03
 #define HCI_EVENT_PKT		0x04
+#define HCI_DIAG_PKT		0xf0
 #define HCI_VENDOR_PKT		0xff
 
 /* HCI packet types */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 9e1a59e..f28470e 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -398,6 +398,7 @@
 	int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
 	void (*notify)(struct hci_dev *hdev, unsigned int evt);
 	void (*hw_error)(struct hci_dev *hdev, u8 code);
+	int (*set_diag)(struct hci_dev *hdev, bool enable);
 	int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
 };
 
@@ -987,6 +988,7 @@
 int hci_reset_dev(struct hci_dev *hdev);
 int hci_dev_open(__u16 dev);
 int hci_dev_close(__u16 dev);
+int hci_dev_do_close(struct hci_dev *hdev);
 int hci_dev_reset(__u16 dev);
 int hci_dev_reset_stat(__u16 dev);
 int hci_dev_cmd(unsigned int cmd, void __user *arg);
@@ -1065,6 +1067,7 @@
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
 
 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
+int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
 
 void hci_init_sysfs(struct hci_dev *hdev);
 void hci_conn_init_sysfs(struct hci_conn *conn);
@@ -1348,6 +1351,9 @@
 
 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
 
+struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+			     const void *param, u32 timeout);
+
 /* ----- HCI Sockets ----- */
 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 77d1e57..2b67567 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -39,6 +39,10 @@
 #define HCI_MON_ACL_RX_PKT	5
 #define HCI_MON_SCO_TX_PKT	6
 #define HCI_MON_SCO_RX_PKT	7
+#define HCI_MON_OPEN_INDEX	8
+#define HCI_MON_CLOSE_INDEX	9
+#define HCI_MON_INDEX_INFO	10
+#define HCI_MON_VENDOR_DIAG	11
 
 struct hci_mon_new_index {
 	__u8		type;
@@ -48,4 +52,10 @@
 } __packed;
 #define HCI_MON_NEW_INDEX_SIZE 16
 
+struct hci_mon_index_info {
+	bdaddr_t	bdaddr;
+	__le16		manufacturer;
+} __packed;
+#define HCI_MON_INDEX_INFO_SIZE 8
+
 #endif /* __HCI_MON_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f0889a2..90332a1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -858,6 +858,8 @@
 /**
  * enum cfg80211_station_type - the type of station being modified
  * @CFG80211_STA_AP_CLIENT: client of an AP interface
+ * @CFG80211_STA_AP_CLIENT_UNASSOC: client of an AP interface that is still
+ *	unassociated (update properties for this type of client is permitted)
  * @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has
  *	the AP MLME in the device
  * @CFG80211_STA_AP_STA: AP station on managed interface
@@ -873,6 +875,7 @@
  */
 enum cfg80211_station_type {
 	CFG80211_STA_AP_CLIENT,
+	CFG80211_STA_AP_CLIENT_UNASSOC,
 	CFG80211_STA_AP_MLME_CLIENT,
 	CFG80211_STA_AP_STA,
 	CFG80211_STA_IBSS,
@@ -2971,12 +2974,21 @@
  * @doit: callback for the operation, note that wdev is %NULL if the
  *	flags didn't ask for a wdev and non-%NULL otherwise; the data
  *	pointer may be %NULL if userspace provided no data at all
+ * @dumpit: dump callback, for transferring bigger/multiple items. The
+ *	@storage points to cb->args[5], ie. is preserved over the multiple
+ *	dumpit calls.
+ * It's recommended to not have the same sub command with both @doit and
+ * @dumpit, so that userspace can assume certain ones are get and others
+ * are used with dump requests.
  */
 struct wiphy_vendor_command {
 	struct nl80211_vendor_cmd_info info;
 	u32 flags;
 	int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev,
 		    const void *data, int data_len);
+	int (*dumpit)(struct wiphy *wiphy, struct wireless_dev *wdev,
+		      struct sk_buff *skb, const void *data, int data_len,
+		      unsigned long *storage);
 };
 
 /**
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 76b1ffa..171cd76 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -27,6 +27,16 @@
 struct wpan_phy;
 struct wpan_phy_cca;
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+struct ieee802154_llsec_device_key;
+struct ieee802154_llsec_seclevel;
+struct ieee802154_llsec_params;
+struct ieee802154_llsec_device;
+struct ieee802154_llsec_table;
+struct ieee802154_llsec_key_id;
+struct ieee802154_llsec_key;
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 struct cfg802154_ops {
 	struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
 							   const char *name,
@@ -65,6 +75,51 @@
 				struct wpan_dev *wpan_dev, bool mode);
 	int	(*set_ackreq_default)(struct wpan_phy *wpan_phy,
 				      struct wpan_dev *wpan_dev, bool ackreq);
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	void	(*get_llsec_table)(struct wpan_phy *wpan_phy,
+				   struct wpan_dev *wpan_dev,
+				   struct ieee802154_llsec_table **table);
+	void	(*lock_llsec_table)(struct wpan_phy *wpan_phy,
+				    struct wpan_dev *wpan_dev);
+	void	(*unlock_llsec_table)(struct wpan_phy *wpan_phy,
+				      struct wpan_dev *wpan_dev);
+	/* TODO remove locking/get table callbacks, this is part of the
+	 * nl802154 interface and should be accessible from ieee802154 layer.
+	 */
+	int	(*get_llsec_params)(struct wpan_phy *wpan_phy,
+				    struct wpan_dev *wpan_dev,
+				    struct ieee802154_llsec_params *params);
+	int	(*set_llsec_params)(struct wpan_phy *wpan_phy,
+				    struct wpan_dev *wpan_dev,
+				    const struct ieee802154_llsec_params *params,
+				    int changed);
+	int	(*add_llsec_key)(struct wpan_phy *wpan_phy,
+				 struct wpan_dev *wpan_dev,
+				 const struct ieee802154_llsec_key_id *id,
+				 const struct ieee802154_llsec_key *key);
+	int	(*del_llsec_key)(struct wpan_phy *wpan_phy,
+				 struct wpan_dev *wpan_dev,
+				 const struct ieee802154_llsec_key_id *id);
+	int	(*add_seclevel)(struct wpan_phy *wpan_phy,
+				 struct wpan_dev *wpan_dev,
+				 const struct ieee802154_llsec_seclevel *sl);
+	int	(*del_seclevel)(struct wpan_phy *wpan_phy,
+				 struct wpan_dev *wpan_dev,
+				 const struct ieee802154_llsec_seclevel *sl);
+	int	(*add_device)(struct wpan_phy *wpan_phy,
+			      struct wpan_dev *wpan_dev,
+			      const struct ieee802154_llsec_device *dev);
+	int	(*del_device)(struct wpan_phy *wpan_phy,
+			      struct wpan_dev *wpan_dev, __le64 extended_addr);
+	int	(*add_devkey)(struct wpan_phy *wpan_phy,
+			      struct wpan_dev *wpan_dev,
+			      __le64 extended_addr,
+			      const struct ieee802154_llsec_device_key *key);
+	int	(*del_devkey)(struct wpan_phy *wpan_phy,
+			      struct wpan_dev *wpan_dev,
+			      __le64 extended_addr,
+			      const struct ieee802154_llsec_device_key *key);
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
 };
 
 static inline bool
@@ -167,6 +222,102 @@
 	char priv[0] __aligned(NETDEV_ALIGN);
 };
 
+struct ieee802154_addr {
+	u8 mode;
+	__le16 pan_id;
+	union {
+		__le16 short_addr;
+		__le64 extended_addr;
+	};
+};
+
+struct ieee802154_llsec_key_id {
+	u8 mode;
+	u8 id;
+	union {
+		struct ieee802154_addr device_addr;
+		__le32 short_source;
+		__le64 extended_source;
+	};
+};
+
+#define IEEE802154_LLSEC_KEY_SIZE 16
+
+struct ieee802154_llsec_key {
+	u8 frame_types;
+	u32 cmd_frame_ids;
+	/* TODO replace with NL802154_KEY_SIZE */
+	u8 key[IEEE802154_LLSEC_KEY_SIZE];
+};
+
+struct ieee802154_llsec_key_entry {
+	struct list_head list;
+
+	struct ieee802154_llsec_key_id id;
+	struct ieee802154_llsec_key *key;
+};
+
+struct ieee802154_llsec_params {
+	bool enabled;
+
+	__be32 frame_counter;
+	u8 out_level;
+	struct ieee802154_llsec_key_id out_key;
+
+	__le64 default_key_source;
+
+	__le16 pan_id;
+	__le64 hwaddr;
+	__le64 coord_hwaddr;
+	__le16 coord_shortaddr;
+};
+
+struct ieee802154_llsec_table {
+	struct list_head keys;
+	struct list_head devices;
+	struct list_head security_levels;
+};
+
+struct ieee802154_llsec_seclevel {
+	struct list_head list;
+
+	u8 frame_type;
+	u8 cmd_frame_id;
+	bool device_override;
+	u32 sec_levels;
+};
+
+struct ieee802154_llsec_device {
+	struct list_head list;
+
+	__le16 pan_id;
+	__le16 short_addr;
+	__le64 hwaddr;
+	u32 frame_counter;
+	bool seclevel_exempt;
+
+	u8 key_mode;
+	struct list_head keys;
+};
+
+struct ieee802154_llsec_device_key {
+	struct list_head list;
+
+	struct ieee802154_llsec_key_id key_id;
+	u32 frame_counter;
+};
+
+struct wpan_dev_header_ops {
+	/* TODO create callback currently assumes ieee802154_mac_cb inside
+	 * skb->cb. This should be changed to give these information as
+	 * parameter.
+	 */
+	int	(*create)(struct sk_buff *skb, struct net_device *dev,
+			  const struct ieee802154_addr *daddr,
+			  const struct ieee802154_addr *saddr,
+			  unsigned int len);
+};
+
 struct wpan_dev {
 	struct wpan_phy *wpan_phy;
 	int iftype;
@@ -175,6 +326,8 @@
 	struct list_head list;
 	struct net_device *netdev;
 
+	const struct wpan_dev_header_ops *header_ops;
+
 	/* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
 	struct net_device *lowpan_dev;
 
@@ -205,6 +358,17 @@
 
 #define to_phy(_dev)	container_of(_dev, struct wpan_phy, dev)
 
+static inline int
+wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
+		     const struct ieee802154_addr *daddr,
+		     const struct ieee802154_addr *saddr,
+		     unsigned int len)
+{
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+
+	return wpan_dev->header_ops->create(skb, dev, daddr, saddr, len);
+}
+
 struct wpan_phy *
 wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size);
 static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev)
diff --git a/include/net/dsa.h b/include/net/dsa.h
index b34d812..e005886 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -197,6 +197,9 @@
 		return ds->pd->rtable[dst->cpu_switch];
 }
 
+struct switchdev_trans;
+struct switchdev_obj_port_fdb;
+
 struct dsa_switch_driver {
 	struct list_head	list;
 
@@ -316,10 +319,14 @@
 	/*
 	 * Forwarding database
 	 */
+	int	(*port_fdb_prepare)(struct dsa_switch *ds, int port,
+				    const struct switchdev_obj_port_fdb *fdb,
+				    struct switchdev_trans *trans);
 	int	(*port_fdb_add)(struct dsa_switch *ds, int port,
-				const unsigned char *addr, u16 vid);
+				const struct switchdev_obj_port_fdb *fdb,
+				struct switchdev_trans *trans);
 	int	(*port_fdb_del)(struct dsa_switch *ds, int port,
-				const unsigned char *addr, u16 vid);
+				const struct switchdev_obj_port_fdb *fdb);
 	int	(*port_fdb_getnext)(struct dsa_switch *ds, int port,
 				    unsigned char *addr, u16 *vid,
 				    bool *is_static);
diff --git a/include/net/dst.h b/include/net/dst.h
index df0481a..1279f9b 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -45,7 +45,7 @@
 	void			*__pad1;
 #endif
 	int			(*input)(struct sk_buff *);
-	int			(*output)(struct sock *sk, struct sk_buff *skb);
+	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
 
 	unsigned short		flags;
 #define DST_HOST		0x0001
@@ -365,10 +365,10 @@
 	__skb_tunnel_rx(skb, dev, net);
 }
 
-int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
+int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 static inline int dst_discard(struct sk_buff *skb)
 {
-	return dst_discard_sk(skb->sk, skb);
+	return dst_discard_out(&init_net, skb->sk, skb);
 }
 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
 		int initial_obsolete, unsigned short flags);
@@ -454,13 +454,9 @@
 }
 
 /* Output packet to network from transport.  */
-static inline int dst_output(struct sock *sk, struct sk_buff *skb)
+static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	return skb_dst(skb)->output(sk, skb);
-}
-static inline int dst_output_okfn(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
-	return dst_output(sk, skb);
+	return skb_dst(skb)->output(net, sk, skb);
 }
 
 /* Input packet from network to transport.  */
@@ -489,7 +485,8 @@
 #ifndef CONFIG_XFRM
 static inline struct dst_entry *xfrm_lookup(struct net *net,
 					    struct dst_entry *dst_orig,
-					    const struct flowi *fl, struct sock *sk,
+					    const struct flowi *fl,
+					    const struct sock *sk,
 					    int flags)
 {
 	return dst_orig;
@@ -498,7 +495,7 @@
 static inline struct dst_entry *xfrm_lookup_route(struct net *net,
 						  struct dst_entry *dst_orig,
 						  const struct flowi *fl,
-						  struct sock *sk,
+						  const struct sock *sk,
 						  int flags)
 {
 	return dst_orig;
@@ -511,11 +508,11 @@
 
 #else
 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
-			      const struct flowi *fl, struct sock *sk,
+			      const struct flowi *fl, const struct sock *sk,
 			      int flags);
 
 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
-				    const struct flowi *fl, struct sock *sk,
+				    const struct flowi *fl, const struct sock *sk,
 				    int flags);
 
 /* skb attached with this dst needs transformation if dst->xfrm is valid */
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index d642539..a0d443c 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -9,6 +9,7 @@
 struct net_device;
 struct sk_buff;
 struct sock;
+struct net;
 
 struct dst_ops {
 	unsigned short		family;
@@ -28,7 +29,7 @@
 					       struct sk_buff *skb, u32 mtu);
 	void			(*redirect)(struct dst_entry *dst, struct sock *sk,
 					    struct sk_buff *skb);
-	int			(*local_out)(struct sk_buff *skb);
+	int			(*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
 	struct neighbour *	(*neigh_lookup)(const struct dst_entry *dst,
 						struct sk_buff *skb,
 						const void *daddr);
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
index 2a2d6bb..bb7f467 100644
--- a/include/net/ethoc.h
+++ b/include/net/ethoc.h
@@ -17,6 +17,7 @@
 	u8 hwaddr[IFHWADDRLEN];
 	s8 phy_id;
 	u32 eth_clkfreq;
+	bool big_endian;
 };
 
 #endif /* !LINUX_NET_ETHOC_H */
diff --git a/include/net/flow.h b/include/net/flow.h
index acd6a09..83969ee 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -34,7 +34,8 @@
 	__u8	flowic_flags;
 #define FLOWI_FLAG_ANYSRC		0x01
 #define FLOWI_FLAG_KNOWN_NH		0x02
-#define FLOWI_FLAG_VRFSRC		0x04
+#define FLOWI_FLAG_L3MDEV_SRC		0x04
+#define FLOWI_FLAG_SKIP_NH_OIF		0x08
 	__u32	flowic_secid;
 	struct flowi_tunnel flowic_tun_key;
 };
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index a9af1cc..1b6b6dc 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -183,9 +183,8 @@
 					    (grps), ARRAY_SIZE(grps))
 
 int genl_unregister_family(struct genl_family *family);
-void genl_notify(struct genl_family *family,
-		 struct sk_buff *skb, struct net *net, u32 portid,
-		 u32 group, struct nlmsghdr *nlh, gfp_t flags);
+void genl_notify(struct genl_family *family, struct sk_buff *skb,
+		 struct genl_info *info, u32 group, gfp_t flags);
 
 struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info,
 				    gfp_t flags);
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 2c10a9f..a62a051 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -50,15 +50,6 @@
 	};
 };
 
-struct ieee802154_addr {
-	u8 mode;
-	__le16 pan_id;
-	union {
-		__le16 short_addr;
-		__le64 extended_addr;
-	};
-};
-
 struct ieee802154_hdr_fc {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
 	u16 type:3,
@@ -99,7 +90,7 @@
  * hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame
  * version, if SECEN is set.
  */
-int ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr);
+int ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr);
 
 /* pulls the entire 802.15.4 header off of the skb, including the security
  * header, and performs pan id decompression
@@ -243,38 +234,6 @@
 	return mac_cb(skb);
 }
 
-#define IEEE802154_LLSEC_KEY_SIZE 16
-
-struct ieee802154_llsec_key_id {
-	u8 mode;
-	u8 id;
-	union {
-		struct ieee802154_addr device_addr;
-		__le32 short_source;
-		__le64 extended_source;
-	};
-};
-
-struct ieee802154_llsec_key {
-	u8 frame_types;
-	u32 cmd_frame_ids;
-	u8 key[IEEE802154_LLSEC_KEY_SIZE];
-};
-
-struct ieee802154_llsec_key_entry {
-	struct list_head list;
-
-	struct ieee802154_llsec_key_id id;
-	struct ieee802154_llsec_key *key;
-};
-
-struct ieee802154_llsec_device_key {
-	struct list_head list;
-
-	struct ieee802154_llsec_key_id key_id;
-	u32 frame_counter;
-};
-
 enum {
 	IEEE802154_LLSEC_DEVKEY_IGNORE,
 	IEEE802154_LLSEC_DEVKEY_RESTRICT,
@@ -283,49 +242,6 @@
 	__IEEE802154_LLSEC_DEVKEY_MAX,
 };
 
-struct ieee802154_llsec_device {
-	struct list_head list;
-
-	__le16 pan_id;
-	__le16 short_addr;
-	__le64 hwaddr;
-	u32 frame_counter;
-	bool seclevel_exempt;
-
-	u8 key_mode;
-	struct list_head keys;
-};
-
-struct ieee802154_llsec_seclevel {
-	struct list_head list;
-
-	u8 frame_type;
-	u8 cmd_frame_id;
-	bool device_override;
-	u32 sec_levels;
-};
-
-struct ieee802154_llsec_params {
-	bool enabled;
-
-	__be32 frame_counter;
-	u8 out_level;
-	struct ieee802154_llsec_key_id out_key;
-
-	__le64 default_key_source;
-
-	__le16 pan_id;
-	__le64 hwaddr;
-	__le64 coord_hwaddr;
-	__le16 coord_shortaddr;
-};
-
-struct ieee802154_llsec_table {
-	struct list_head keys;
-	struct list_head devices;
-	struct list_head security_levels;
-};
-
 #define IEEE802154_MAC_SCAN_ED		0
 #define IEEE802154_MAC_SCAN_ACTIVE	1
 #define IEEE802154_MAC_SCAN_PASSIVE	2
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 6d539e4..064cfbe 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -25,17 +25,8 @@
 int inet6_csk_bind_conflict(const struct sock *sk,
 			    const struct inet_bind_bucket *tb, bool relax);
 
-struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6,
-				      const struct request_sock *req);
-
-struct request_sock *inet6_csk_search_req(struct sock *sk,
-					  const __be16 rport,
-					  const struct in6_addr *raddr,
-					  const struct in6_addr *laddr,
-					  const int iif);
-
-void inet6_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
-				    const unsigned long timeout);
+struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
+				      const struct request_sock *req, u8 proto);
 
 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
 
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 0320bbb..3208a65 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -41,7 +41,7 @@
 	int	    (*rebuild_header)(struct sock *sk);
 	void	    (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
 	int	    (*conn_request)(struct sock *sk, struct sk_buff *skb);
-	struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
+	struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
 				      struct request_sock *req,
 				      struct dst_entry *dst);
 	u16	    net_header_len;
@@ -258,17 +258,14 @@
 
 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
 
-struct request_sock *inet_csk_search_req(struct sock *sk,
-					 const __be16 rport,
-					 const __be32 raddr,
-					 const __be32 laddr);
 int inet_csk_bind_conflict(const struct sock *sk,
 			   const struct inet_bind_bucket *tb, bool relax);
 int inet_csk_get_port(struct sock *sk, unsigned short snum);
 
-struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4,
+struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
 				     const struct request_sock *req);
-struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk,
+struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
+					    struct sock *newsk,
 					    const struct request_sock *req);
 
 static inline void inet_csk_reqsk_queue_add(struct sock *sk,
@@ -281,8 +278,7 @@
 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
 				   unsigned long timeout);
 
-static inline void inet_csk_reqsk_queue_added(struct sock *sk,
-					      const unsigned long timeout)
+static inline void inet_csk_reqsk_queue_added(struct sock *sk)
 {
 	reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
 }
@@ -299,7 +295,7 @@
 
 static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
 {
-	return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
+	return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
 }
 
 void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index b07d126..6683ada 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -199,12 +199,13 @@
 }
 
 /* Caller must disable local BH processing. */
-int __inet_inherit_port(struct sock *sk, struct sock *child);
+int __inet_inherit_port(const struct sock *sk, struct sock *child);
 
 void inet_put_port(struct sock *sk);
 
 void inet_hashinfo_init(struct inet_hashinfo *h);
 
+int inet_ehash_insert(struct sock *sk, struct sock *osk);
 void __inet_hash_nolisten(struct sock *sk, struct sock *osk);
 void __inet_hash(struct sock *sk, struct sock *osk);
 void inet_hash(struct sock *sk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 47eb67b..f5bf731 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -245,7 +245,8 @@
 }
 
 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
-				      struct sock *sk_listener);
+				      struct sock *sk_listener,
+				      bool attach_listener);
 
 static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
 {
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 879d6e5..e581fc6 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -70,6 +70,7 @@
 #define tw_dport		__tw_common.skc_dport
 #define tw_num			__tw_common.skc_num
 #define tw_cookie		__tw_common.skc_cookie
+#define tw_dr			__tw_common.skc_tw_dr
 
 	int			tw_timeout;
 	volatile unsigned char	tw_substate;
@@ -88,7 +89,6 @@
 	kmemcheck_bitfield_end(flags);
 	struct timer_list	tw_timer;
 	struct inet_bind_bucket	*tw_tb;
-	struct inet_timewait_death_row *tw_dr;
 };
 #define tw_tclass tw_tos
 
@@ -110,7 +110,19 @@
 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
 			   struct inet_hashinfo *hashinfo);
 
-void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
+void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
+			  bool rearm);
+
+static void inline inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
+{
+	__inet_twsk_schedule(tw, timeo, false);
+}
+
+static void inline inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
+{
+	__inet_twsk_schedule(tw, timeo, true);
+}
+
 void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
 
 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
diff --git a/include/net/ip.h b/include/net/ip.h
index 9b9ca28..1a98f1c 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -100,24 +100,20 @@
  *	Functions provided by ip.c
  */
 
-int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
 			  __be32 saddr, __be32 daddr,
 			  struct ip_options_rcu *opt);
 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
 	   struct net_device *orig_dev);
 int ip_local_deliver(struct sk_buff *skb);
 int ip_mr_input(struct sk_buff *skb);
-int ip_output(struct sock *sk, struct sk_buff *skb);
-int ip_mc_output(struct sock *sk, struct sk_buff *skb);
-int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
-		   int (*output)(struct sock *, struct sk_buff *));
+int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+		   int (*output)(struct net *, struct sock *, struct sk_buff *));
 void ip_send_check(struct iphdr *ip);
-int __ip_local_out(struct sk_buff *skb);
-int ip_local_out_sk(struct sock *sk, struct sk_buff *skb);
-static inline int ip_local_out(struct sk_buff *skb)
-{
-	return ip_local_out_sk(skb->sk, skb);
-}
+int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 
 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
 void ip_init(void);
@@ -282,10 +278,12 @@
 }
 
 static inline
-int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
+int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
 {
-	return  inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO ||
-		(inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT &&
+	u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
+
+	return  pmtudisc == IP_PMTUDISC_DO ||
+		(pmtudisc == IP_PMTUDISC_WANT &&
 		 !(dst_metric_locked(dst, RTAX_MTU)));
 }
 
@@ -321,12 +319,15 @@
 
 static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
 {
-	if (!skb->sk || ip_sk_use_pmtu(skb->sk)) {
+	struct sock *sk = skb->sk;
+
+	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
 		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
+
 		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
-	} else {
-		return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
 	}
+
+	return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
 }
 
 u32 ip_idents_reserve(u32 hash, int segs);
@@ -505,11 +506,11 @@
 	return user >= lower_bond && user <= upper_bond;
 }
 
-int ip_defrag(struct sk_buff *skb, u32 user);
+int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
 #ifdef CONFIG_INET
-struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
+struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
 #else
-static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
 {
 	return skb;
 }
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 063d304..aaf9700 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -275,7 +275,8 @@
 	     struct nl_info *info, struct mx6_config *mxc);
 int fib6_del(struct rt6_info *rt, struct nl_info *info);
 
-void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info);
+void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
+		     unsigned int flags);
 
 void fib6_run_gc(unsigned long expires, struct net *net, bool force);
 
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 297629a..2bfb2ad 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -173,8 +173,8 @@
 		 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
 }
 
-int ip6_fragment(struct sock *sk, struct sk_buff *skb,
-		 int (*output)(struct sock *, struct sk_buff *));
+int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+		 int (*output)(struct net *, struct sock *, struct sk_buff *));
 
 static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
 {
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index b8529aa..aaee6fa 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -32,6 +32,12 @@
 	__be32			o_key;
 };
 
+struct ip6_tnl_dst {
+	seqlock_t lock;
+	struct dst_entry __rcu *dst;
+	u32 cookie;
+};
+
 /* IPv6 tunnel */
 struct ip6_tnl {
 	struct ip6_tnl __rcu *next;	/* next tunnel in list */
@@ -39,8 +45,7 @@
 	struct net *net;	/* netns for packet i/o */
 	struct __ip6_tnl_parm parms;	/* tunnel configuration parameters */
 	struct flowi fl;	/* flowi template for xmit */
-	struct dst_entry *dst_cache;    /* cached dst */
-	u32 dst_cookie;
+	struct ip6_tnl_dst __percpu *dst_cache;	/* cached dst */
 
 	int err_count;
 	unsigned long err_time;
@@ -60,9 +65,11 @@
 	__u8 encap_limit;	/* tunnel encapsulation limit   */
 } __packed;
 
-struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t);
+struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t);
+int ip6_tnl_dst_init(struct ip6_tnl *t);
+void ip6_tnl_dst_destroy(struct ip6_tnl *t);
 void ip6_tnl_dst_reset(struct ip6_tnl *t);
-void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
+void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst);
 int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
 		const struct in6_addr *raddr);
 int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
@@ -79,8 +86,8 @@
 	struct net_device_stats *stats = &dev->stats;
 	int pkt_len, err;
 
-	pkt_len = skb->len;
-	err = ip6_local_out_sk(sk, skb);
+	pkt_len = skb->len - skb_inner_network_offset(skb);
+	err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
 
 	if (net_xmit_eval(err) == 0) {
 		struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index a37d043..ac5c6e8 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -79,7 +79,7 @@
 	unsigned char		nh_scope;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 	int			nh_weight;
-	int			nh_power;
+	atomic_t		nh_upper_bound;
 #endif
 #ifdef CONFIG_IP_ROUTE_CLASSID
 	__u32			nh_tclassid;
@@ -118,7 +118,7 @@
 #define fib_advmss fib_metrics[RTAX_ADVMSS-1]
 	int			fib_nhs;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-	int			fib_power;
+	int			fib_weight;
 #endif
 	struct rcu_head		rcu;
 	struct fib_nh		fib_nh[0];
@@ -236,8 +236,11 @@
 	rcu_read_lock();
 
 	tb = fib_get_table(net, RT_TABLE_MAIN);
-	if (tb && !fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF))
-		err = 0;
+	if (tb)
+		err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF);
+
+	if (err == -EAGAIN)
+		err = -ENETUNREACH;
 
 	rcu_read_unlock();
 
@@ -258,7 +261,7 @@
 			     struct fib_result *res, unsigned int flags)
 {
 	struct fib_table *tb;
-	int err;
+	int err = -ENETUNREACH;
 
 	flags |= FIB_LOOKUP_NOREF;
 	if (net->ipv4.fib_has_custom_rules)
@@ -268,15 +271,20 @@
 
 	res->tclassid = 0;
 
-	for (err = 0; !err; err = -ENETUNREACH) {
-		tb = rcu_dereference_rtnl(net->ipv4.fib_main);
-		if (tb && !fib_table_lookup(tb, flp, res, flags))
-			break;
+	tb = rcu_dereference_rtnl(net->ipv4.fib_main);
+	if (tb)
+		err = fib_table_lookup(tb, flp, res, flags);
 
-		tb = rcu_dereference_rtnl(net->ipv4.fib_default);
-		if (tb && !fib_table_lookup(tb, flp, res, flags))
-			break;
-	}
+	if (!err)
+		goto out;
+
+	tb = rcu_dereference_rtnl(net->ipv4.fib_default);
+	if (tb)
+		err = fib_table_lookup(tb, flp, res, flags);
+
+out:
+	if (err == -EAGAIN)
+		err = -ENETUNREACH;
 
 	rcu_read_unlock();
 
@@ -312,7 +320,17 @@
 int fib_sync_down_dev(struct net_device *dev, unsigned long event);
 int fib_sync_down_addr(struct net *net, __be32 local);
 int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
-void fib_select_multipath(struct fib_result *res);
+
+extern u32 fib_multipath_secret __read_mostly;
+
+static inline int fib_multipath_hash(__be32 saddr, __be32 daddr)
+{
+	return jhash_2words(saddr, daddr, fib_multipath_secret) >> 1;
+}
+
+void fib_select_multipath(struct fib_result *res, int hash);
+void fib_select_path(struct net *net, struct fib_result *res,
+		     struct flowi4 *fl4, int mp_hash);
 
 /* Exported by fib_trie.c */
 void fib_trie_init(void);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 9a6a3ba..f6dafec 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -276,6 +276,8 @@
 int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
 		  __be32 src, __be32 dst, u8 proto,
 		  u8 tos, u8 ttl, __be16 df, bool xnet);
+struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
+					     gfp_t flags);
 
 struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
 					 int gso_type_mask);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 9b9ca87..0816c87 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -29,65 +29,15 @@
 #endif
 #include <net/net_namespace.h>		/* Netw namespace */
 
+#define IP_VS_HDR_INVERSE	1
+#define IP_VS_HDR_ICMP		2
+
 /* Generic access of ipvs struct */
 static inline struct netns_ipvs *net_ipvs(struct net* net)
 {
 	return net->ipvs;
 }
 
-/* Get net ptr from skb in traffic cases
- * use skb_sknet when call is from userland (ioctl or netlink)
- */
-static inline struct net *skb_net(const struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_NS
-#ifdef CONFIG_IP_VS_DEBUG
-	/*
-	 * This is used for debug only.
-	 * Start with the most likely hit
-	 * End with BUG
-	 */
-	if (likely(skb->dev && dev_net(skb->dev)))
-		return dev_net(skb->dev);
-	if (skb_dst(skb) && skb_dst(skb)->dev)
-		return dev_net(skb_dst(skb)->dev);
-	WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
-		      __func__, __LINE__);
-	if (likely(skb->sk && sock_net(skb->sk)))
-		return sock_net(skb->sk);
-	pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
-		__func__, __LINE__);
-	BUG();
-#else
-	return dev_net(skb->dev ? : skb_dst(skb)->dev);
-#endif
-#else
-	return &init_net;
-#endif
-}
-
-static inline struct net *skb_sknet(const struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_NS
-#ifdef CONFIG_IP_VS_DEBUG
-	/* Start with the most likely hit */
-	if (likely(skb->sk && sock_net(skb->sk)))
-		return sock_net(skb->sk);
-	WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
-		       __func__, __LINE__);
-	if (likely(skb->dev && dev_net(skb->dev)))
-		return dev_net(skb->dev);
-	pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
-		__func__, __LINE__);
-	BUG();
-#else
-	return sock_net(skb->sk);
-#endif
-#else
-	return &init_net;
-#endif
-}
-
 /* This one needed for single_open_net since net is stored directly in
  * private not as a struct i.e. seq_file_net can't be used.
  */
@@ -104,6 +54,8 @@
 extern int ip_vs_conn_tab_size;
 
 struct ip_vs_iphdr {
+	int hdr_flags;	/* ipvs flags */
+	__u32 off;	/* Where IP or IPv4 header starts */
 	__u32 len;	/* IPv4 simply where L4 starts
 			 * IPv6 where L4 Transport Header starts */
 	__u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
@@ -120,48 +72,89 @@
 	return skb_header_pointer(skb, offset, len, buffer);
 }
 
-static inline void
-ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr)
-{
-	const struct iphdr *iph = nh;
-
-	iphdr->len	= iph->ihl * 4;
-	iphdr->fragoffs	= 0;
-	iphdr->protocol	= iph->protocol;
-	iphdr->saddr.ip	= iph->saddr;
-	iphdr->daddr.ip	= iph->daddr;
-}
-
 /* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6.
  * IPv6 requires some extra work, as finding proper header position,
  * depend on the IPv6 extension headers.
  */
-static inline void
-ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, struct ip_vs_iphdr *iphdr)
+static inline int
+ip_vs_fill_iph_skb_off(int af, const struct sk_buff *skb, int offset,
+		       int hdr_flags, struct ip_vs_iphdr *iphdr)
 {
+	iphdr->hdr_flags = hdr_flags;
+	iphdr->off = offset;
+
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6) {
-		const struct ipv6hdr *iph =
-			(struct ipv6hdr *)skb_network_header(skb);
+		struct ipv6hdr _iph;
+		const struct ipv6hdr *iph = skb_header_pointer(
+			skb, offset, sizeof(_iph), &_iph);
+		if (!iph)
+			return 0;
+
 		iphdr->saddr.in6 = iph->saddr;
 		iphdr->daddr.in6 = iph->daddr;
 		/* ipv6_find_hdr() updates len, flags */
-		iphdr->len	 = 0;
+		iphdr->len	 = offset;
 		iphdr->flags	 = 0;
 		iphdr->protocol  = ipv6_find_hdr(skb, &iphdr->len, -1,
 						 &iphdr->fragoffs,
 						 &iphdr->flags);
+		if (iphdr->protocol < 0)
+			return 0;
 	} else
 #endif
 	{
-		const struct iphdr *iph =
-			(struct iphdr *)skb_network_header(skb);
-		iphdr->len	= iph->ihl * 4;
+		struct iphdr _iph;
+		const struct iphdr *iph = skb_header_pointer(
+			skb, offset, sizeof(_iph), &_iph);
+		if (!iph)
+			return 0;
+
+		iphdr->len	= offset + iph->ihl * 4;
 		iphdr->fragoffs	= 0;
 		iphdr->protocol	= iph->protocol;
 		iphdr->saddr.ip	= iph->saddr;
 		iphdr->daddr.ip	= iph->daddr;
 	}
+
+	return 1;
+}
+
+static inline int
+ip_vs_fill_iph_skb_icmp(int af, const struct sk_buff *skb, int offset,
+			bool inverse, struct ip_vs_iphdr *iphdr)
+{
+	int hdr_flags = IP_VS_HDR_ICMP;
+
+	if (inverse)
+		hdr_flags |= IP_VS_HDR_INVERSE;
+
+	return ip_vs_fill_iph_skb_off(af, skb, offset, hdr_flags, iphdr);
+}
+
+static inline int
+ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, bool inverse,
+		   struct ip_vs_iphdr *iphdr)
+{
+	int hdr_flags = 0;
+
+	if (inverse)
+		hdr_flags |= IP_VS_HDR_INVERSE;
+
+	return ip_vs_fill_iph_skb_off(af, skb, skb_network_offset(skb),
+				      hdr_flags, iphdr);
+}
+
+static inline bool
+ip_vs_iph_inverse(const struct ip_vs_iphdr *iph)
+{
+	return !!(iph->hdr_flags & IP_VS_HDR_INVERSE);
+}
+
+static inline bool
+ip_vs_iph_icmp(const struct ip_vs_iphdr *iph)
+{
+	return !!(iph->hdr_flags & IP_VS_HDR_ICMP);
 }
 
 static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
@@ -437,26 +430,27 @@
 
 	void (*exit)(struct ip_vs_protocol *pp);
 
-	int (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
+	int (*init_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd);
 
-	void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd);
+	void (*exit_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd);
 
-	int (*conn_schedule)(int af, struct sk_buff *skb,
+	int (*conn_schedule)(struct netns_ipvs *ipvs,
+			     int af, struct sk_buff *skb,
 			     struct ip_vs_proto_data *pd,
 			     int *verdict, struct ip_vs_conn **cpp,
 			     struct ip_vs_iphdr *iph);
 
 	struct ip_vs_conn *
-	(*conn_in_get)(int af,
+	(*conn_in_get)(struct netns_ipvs *ipvs,
+		       int af,
 		       const struct sk_buff *skb,
-		       const struct ip_vs_iphdr *iph,
-		       int inverse);
+		       const struct ip_vs_iphdr *iph);
 
 	struct ip_vs_conn *
-	(*conn_out_get)(int af,
+	(*conn_out_get)(struct netns_ipvs *ipvs,
+			int af,
 			const struct sk_buff *skb,
-			const struct ip_vs_iphdr *iph,
-			int inverse);
+			const struct ip_vs_iphdr *iph);
 
 	int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
 			    struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
@@ -473,9 +467,9 @@
 				 const struct sk_buff *skb,
 				 struct ip_vs_proto_data *pd);
 
-	int (*register_app)(struct net *net, struct ip_vs_app *inc);
+	int (*register_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc);
 
-	void (*unregister_app)(struct net *net, struct ip_vs_app *inc);
+	void (*unregister_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc);
 
 	int (*app_conn_bind)(struct ip_vs_conn *cp);
 
@@ -497,11 +491,11 @@
 };
 
 struct ip_vs_protocol   *ip_vs_proto_get(unsigned short proto);
-struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
+struct ip_vs_proto_data *ip_vs_proto_data_get(struct netns_ipvs *ipvs,
 					      unsigned short proto);
 
 struct ip_vs_conn_param {
-	struct net			*net;
+	struct netns_ipvs		*ipvs;
 	const union nf_inet_addr	*caddr;
 	const union nf_inet_addr	*vaddr;
 	__be16				cport;
@@ -528,9 +522,7 @@
 	volatile __u32          flags;          /* status flags */
 	__u16                   protocol;       /* Which protocol (TCP/UDP) */
 	__u16			daf;		/* Address family of the dest */
-#ifdef CONFIG_NET_NS
-	struct net              *net;           /* Name space */
-#endif
+	struct netns_ipvs	*ipvs;
 
 	/* counter and timer */
 	atomic_t		refcnt;		/* reference count */
@@ -577,33 +569,6 @@
 	struct rcu_head		rcu_head;
 };
 
-/* To save some memory in conn table when name space is disabled. */
-static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
-{
-#ifdef CONFIG_NET_NS
-	return cp->net;
-#else
-	return &init_net;
-#endif
-}
-
-static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
-{
-#ifdef CONFIG_NET_NS
-	cp->net = net;
-#endif
-}
-
-static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
-				    struct net *net)
-{
-#ifdef CONFIG_NET_NS
-	return cp->net == net;
-#else
-	return 1;
-#endif
-}
-
 /* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user
  * for IPv6 support.
  *
@@ -663,7 +628,7 @@
 	unsigned int		flags;	  /* service status flags */
 	unsigned int		timeout;  /* persistent timeout in ticks */
 	__be32			netmask;  /* grouping granularity, mask/plen */
-	struct net		*net;
+	struct netns_ipvs	*ipvs;
 
 	struct list_head	destinations;  /* real server d-linked list */
 	__u32			num_dests;     /* number of servers */
@@ -953,6 +918,8 @@
 	int			sysctl_pmtu_disc;
 	int			sysctl_backup_only;
 	int			sysctl_conn_reuse_mode;
+	int			sysctl_schedule_icmp;
+	int			sysctl_ignore_tunneled;
 
 	/* ip_vs_lblc */
 	int			sysctl_lblc_expiration;
@@ -1071,6 +1038,21 @@
 	return ipvs->sysctl_conn_reuse_mode;
 }
 
+static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs)
+{
+	return ipvs->sysctl_schedule_icmp;
+}
+
+static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs)
+{
+	return ipvs->sysctl_ignore_tunneled;
+}
+
+static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
+{
+	return ipvs->sysctl_cache_bypass;
+}
+
 #else
 
 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1143,6 +1125,21 @@
 	return 1;
 }
 
+static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs)
+{
+	return 0;
+}
+
+static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs)
+{
+	return 0;
+}
+
+static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
+{
+	return 0;
+}
+
 #endif
 
 /* IPVS core functions
@@ -1164,14 +1161,14 @@
 	IP_VS_DIR_LAST,
 };
 
-static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol,
+static inline void ip_vs_conn_fill_param(struct netns_ipvs *ipvs, int af, int protocol,
 					 const union nf_inet_addr *caddr,
 					 __be16 cport,
 					 const union nf_inet_addr *vaddr,
 					 __be16 vport,
 					 struct ip_vs_conn_param *p)
 {
-	p->net = net;
+	p->ipvs = ipvs;
 	p->af = af;
 	p->protocol = protocol;
 	p->caddr = caddr;
@@ -1185,15 +1182,15 @@
 struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p);
 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
 
-struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
-					    const struct ip_vs_iphdr *iph,
-					    int inverse);
+struct ip_vs_conn * ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af,
+					    const struct sk_buff *skb,
+					    const struct ip_vs_iphdr *iph);
 
 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
 
-struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
-					     const struct ip_vs_iphdr *iph,
-					     int inverse);
+struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
+					     const struct sk_buff *skb,
+					     const struct ip_vs_iphdr *iph);
 
 /* Get reference to gain full access to conn.
  * By default, RCU read-side critical sections have access only to
@@ -1221,9 +1218,9 @@
 
 const char *ip_vs_state_name(__u16 proto, int state);
 
-void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
+void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
 int ip_vs_check_template(struct ip_vs_conn *ct);
-void ip_vs_random_dropentry(struct net *net);
+void ip_vs_random_dropentry(struct netns_ipvs *ipvs);
 int ip_vs_conn_init(void);
 void ip_vs_conn_cleanup(void);
 
@@ -1288,29 +1285,29 @@
 }
 
 /* IPVS netns init & cleanup functions */
-int ip_vs_estimator_net_init(struct net *net);
-int ip_vs_control_net_init(struct net *net);
-int ip_vs_protocol_net_init(struct net *net);
-int ip_vs_app_net_init(struct net *net);
-int ip_vs_conn_net_init(struct net *net);
-int ip_vs_sync_net_init(struct net *net);
-void ip_vs_conn_net_cleanup(struct net *net);
-void ip_vs_app_net_cleanup(struct net *net);
-void ip_vs_protocol_net_cleanup(struct net *net);
-void ip_vs_control_net_cleanup(struct net *net);
-void ip_vs_estimator_net_cleanup(struct net *net);
-void ip_vs_sync_net_cleanup(struct net *net);
-void ip_vs_service_net_cleanup(struct net *net);
+int ip_vs_estimator_net_init(struct netns_ipvs *ipvs);
+int ip_vs_control_net_init(struct netns_ipvs *ipvs);
+int ip_vs_protocol_net_init(struct netns_ipvs *ipvs);
+int ip_vs_app_net_init(struct netns_ipvs *ipvs);
+int ip_vs_conn_net_init(struct netns_ipvs *ipvs);
+int ip_vs_sync_net_init(struct netns_ipvs *ipvs);
+void ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_app_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_control_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_service_net_cleanup(struct netns_ipvs *ipvs);
 
 /* IPVS application functions
  * (from ip_vs_app.c)
  */
 #define IP_VS_APP_MAX_PORTS  8
-struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app);
-void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
+struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app);
+void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app);
 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
 void ip_vs_unbind_app(struct ip_vs_conn *cp);
-int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+int register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
 			   __u16 port);
 int ip_vs_app_inc_get(struct ip_vs_app *inc);
 void ip_vs_app_inc_put(struct ip_vs_app *inc);
@@ -1375,10 +1372,10 @@
 extern int sysctl_ip_vs_sync_ver;
 
 struct ip_vs_service *
-ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol,
 		  const union nf_inet_addr *vaddr, __be16 vport);
 
-bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
+bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
 			    const union nf_inet_addr *daddr, __be16 dport);
 
 int ip_vs_use_count_inc(void);
@@ -1388,7 +1385,7 @@
 int ip_vs_control_init(void);
 void ip_vs_control_cleanup(void);
 struct ip_vs_dest *
-ip_vs_find_dest(struct net *net, int svc_af, int dest_af,
+ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af,
 		const union nf_inet_addr *daddr, __be16 dport,
 		const union nf_inet_addr *vaddr, __be16 vport,
 		__u16 protocol, __u32 fwmark, __u32 flags);
@@ -1414,14 +1411,14 @@
 /* IPVS sync daemon data and function prototypes
  * (from ip_vs_sync.c)
  */
-int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *cfg,
+int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *cfg,
 		      int state);
-int stop_sync_thread(struct net *net, int state);
-void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
+int stop_sync_thread(struct netns_ipvs *ipvs, int state);
+void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts);
 
 /* IPVS rate estimator prototypes (from ip_vs_est.c) */
-void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
-void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
+void ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats);
+void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats);
 void ip_vs_zero_estimator(struct ip_vs_stats *stats);
 void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats);
 
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 384a93c..e1a10b0 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -812,7 +812,7 @@
 /*
  *	upper-layer output functions
  */
-int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 	     struct ipv6_txoptions *opt, int tclass);
 
 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
@@ -849,7 +849,7 @@
 
 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
 		   struct flowi6 *fl6);
-struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
 				      const struct in6_addr *final_dst);
 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
 					 const struct in6_addr *final_dst);
@@ -860,14 +860,13 @@
  *	skb processing functions
  */
 
-int ip6_output(struct sock *sk, struct sk_buff *skb);
+int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
 int ip6_forward(struct sk_buff *skb);
 int ip6_input(struct sk_buff *skb);
 int ip6_mc_input(struct sk_buff *skb);
 
-int __ip6_local_out(struct sk_buff *skb);
-int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb);
-int ip6_local_out(struct sk_buff *skb);
+int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 
 /*
  *	Extension header (options) processing
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
index 0894ced..b867b0c 100644
--- a/include/net/iucv/iucv.h
+++ b/include/net/iucv/iucv.h
@@ -141,14 +141,14 @@
 	  * called is the order of the registration of the iucv handlers
 	  * to the base code.
 	  */
-	int  (*path_pending)(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
+	int  (*path_pending)(struct iucv_path *, u8 *ipvmid, u8 *ipuser);
 	/*
 	 * The path_complete function is called after an iucv interrupt
 	 * type 0x02 has been received for a path that has been established
 	 * for this handler with iucv_path_connect and got accepted by the
 	 * peer with iucv_path_accept.
 	 */
-	void (*path_complete)(struct iucv_path *, u8 ipuser[16]);
+	void (*path_complete)(struct iucv_path *, u8 *ipuser);
 	 /*
 	  * The path_severed function is called after an iucv interrupt
 	  * type 0x03 has been received. The communication peer shutdown
@@ -156,20 +156,20 @@
 	  * remaining messages can be received until a iucv_path_sever
 	  * shuts down the other end of the path as well.
 	  */
-	void (*path_severed)(struct iucv_path *, u8 ipuser[16]);
+	void (*path_severed)(struct iucv_path *, u8 *ipuser);
 	/*
 	 * The path_quiesced function is called after an icuv interrupt
 	 * type 0x04 has been received. The communication peer has quiesced
 	 * the path. Delivery of messages is stopped until iucv_path_resume
 	 * has been called.
 	 */
-	void (*path_quiesced)(struct iucv_path *, u8 ipuser[16]);
+	void (*path_quiesced)(struct iucv_path *, u8 *ipuser);
 	/*
 	 * The path_resumed function is called after an icuv interrupt
 	 * type 0x05 has been received. The communication peer has resumed
 	 * the path.
 	 */
-	void (*path_resumed)(struct iucv_path *, u8 ipuser[16]);
+	void (*path_resumed)(struct iucv_path *, u8 *ipuser);
 	/*
 	 * The message_pending function is called after an icuv interrupt
 	 * type 0x06 or type 0x07 has been received. A new message is
@@ -256,7 +256,7 @@
  * Returns the result of the CP IUCV call.
  */
 int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
-		     u8 userdata[16], void *private);
+		     u8 *userdata, void *private);
 
 /**
  * iucv_path_connect
@@ -274,7 +274,7 @@
  * Returns the result of the CP IUCV call.
  */
 int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
-		      u8 userid[8], u8 system[8], u8 userdata[16],
+		      u8 *userid, u8 *system, u8 *userdata,
 		      void *private);
 
 /**
@@ -287,7 +287,7 @@
  *
  * Returns the result from the CP IUCV call.
  */
-int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]);
+int iucv_path_quiesce(struct iucv_path *path, u8 *userdata);
 
 /**
  * iucv_path_resume:
@@ -299,7 +299,7 @@
  *
  * Returns the result from the CP IUCV call.
  */
-int iucv_path_resume(struct iucv_path *path, u8 userdata[16]);
+int iucv_path_resume(struct iucv_path *path, u8 *userdata);
 
 /**
  * iucv_path_sever
@@ -310,7 +310,7 @@
  *
  * Returns the result from the CP IUCV call.
  */
-int iucv_path_sever(struct iucv_path *path, u8 userdata[16]);
+int iucv_path_sever(struct iucv_path *path, u8 *userdata);
 
 /**
  * iucv_message_purge
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
new file mode 100644
index 0000000..44a19a1
--- /dev/null
+++ b/include/net/l3mdev.h
@@ -0,0 +1,176 @@
+/*
+ * include/net/l3mdev.h - L3 master device API
+ * Copyright (c) 2015 Cumulus Networks
+ * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _NET_L3MDEV_H_
+#define _NET_L3MDEV_H_
+
+/**
+ * struct l3mdev_ops - l3mdev operations
+ *
+ * @l3mdev_fib_table: Get FIB table id to use for lookups
+ *
+ * @l3mdev_get_rtable: Get cached IPv4 rtable (dst_entry) for device
+ *
+ * @l3mdev_get_saddr: Get source address for a flow
+ */
+
+struct l3mdev_ops {
+	u32		(*l3mdev_fib_table)(const struct net_device *dev);
+	struct rtable *	(*l3mdev_get_rtable)(const struct net_device *dev,
+					     const struct flowi4 *fl4);
+	void		(*l3mdev_get_saddr)(struct net_device *dev,
+					    struct flowi4 *fl4);
+};
+
+#ifdef CONFIG_NET_L3_MASTER_DEV
+
+int l3mdev_master_ifindex_rcu(struct net_device *dev);
+static inline int l3mdev_master_ifindex(struct net_device *dev)
+{
+	int ifindex;
+
+	rcu_read_lock();
+	ifindex = l3mdev_master_ifindex_rcu(dev);
+	rcu_read_unlock();
+
+	return ifindex;
+}
+
+/* get index of an interface to use for FIB lookups. For devices
+ * enslaved to an L3 master device FIB lookups are based on the
+ * master index
+ */
+static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
+{
+	return l3mdev_master_ifindex_rcu(dev) ? : dev->ifindex;
+}
+
+static inline int l3mdev_fib_oif(struct net_device *dev)
+{
+	int oif;
+
+	rcu_read_lock();
+	oif = l3mdev_fib_oif_rcu(dev);
+	rcu_read_unlock();
+
+	return oif;
+}
+
+u32 l3mdev_fib_table_rcu(const struct net_device *dev);
+u32 l3mdev_fib_table_by_index(struct net *net, int ifindex);
+static inline u32 l3mdev_fib_table(const struct net_device *dev)
+{
+	u32 tb_id;
+
+	rcu_read_lock();
+	tb_id = l3mdev_fib_table_rcu(dev);
+	rcu_read_unlock();
+
+	return tb_id;
+}
+
+static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
+					       const struct flowi4 *fl4)
+{
+	if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rtable)
+		return dev->l3mdev_ops->l3mdev_get_rtable(dev, fl4);
+
+	return NULL;
+}
+
+static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
+{
+	struct net_device *dev;
+	bool rc = false;
+
+	if (ifindex == 0)
+		return false;
+
+	rcu_read_lock();
+
+	dev = dev_get_by_index_rcu(net, ifindex);
+	if (dev)
+		rc = netif_is_l3_master(dev);
+
+	rcu_read_unlock();
+
+	return rc;
+}
+
+static inline void l3mdev_get_saddr(struct net *net, int ifindex,
+				    struct flowi4 *fl4)
+{
+	struct net_device *dev;
+
+	if (ifindex) {
+
+		rcu_read_lock();
+
+		dev = dev_get_by_index_rcu(net, ifindex);
+		if (dev && netif_is_l3_master(dev) &&
+		    dev->l3mdev_ops->l3mdev_get_saddr) {
+			dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4);
+		}
+
+		rcu_read_unlock();
+	}
+}
+
+#else
+
+static inline int l3mdev_master_ifindex_rcu(struct net_device *dev)
+{
+	return 0;
+}
+static inline int l3mdev_master_ifindex(struct net_device *dev)
+{
+	return 0;
+}
+
+static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
+{
+	return dev ? dev->ifindex : 0;
+}
+static inline int l3mdev_fib_oif(struct net_device *dev)
+{
+	return dev ? dev->ifindex : 0;
+}
+
+static inline u32 l3mdev_fib_table_rcu(const struct net_device *dev)
+{
+	return 0;
+}
+static inline u32 l3mdev_fib_table(const struct net_device *dev)
+{
+	return 0;
+}
+static inline u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
+{
+	return 0;
+}
+
+static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
+					       const struct flowi4 *fl4)
+{
+	return NULL;
+}
+
+static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
+{
+	return false;
+}
+
+static inline void l3mdev_get_saddr(struct net *net, int ifindex,
+				    struct flowi4 *fl4)
+{
+}
+#endif
+
+#endif /* _NET_L3MDEV_H_ */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index fce0e35..66350ce 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -18,7 +18,7 @@
 	__u16		type;
 	__u16		flags;
 	atomic_t	refcnt;
-	int		(*orig_output)(struct sock *sk, struct sk_buff *skb);
+	int		(*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
 	int		(*orig_input)(struct sk_buff *);
 	int             len;
 	__u8            data[0];
@@ -28,7 +28,7 @@
 	int (*build_state)(struct net_device *dev, struct nlattr *encap,
 			   unsigned int family, const void *cfg,
 			   struct lwtunnel_state **ts);
-	int (*output)(struct sock *sk, struct sk_buff *skb);
+	int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
 	int (*input)(struct sk_buff *skb);
 	int (*fill_encap)(struct sk_buff *skb,
 			  struct lwtunnel_state *lwtstate);
@@ -88,7 +88,7 @@
 int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
 struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
 int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
-int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
+int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb);
 int lwtunnel_input(struct sk_buff *skb);
 
 #else
@@ -160,7 +160,7 @@
 	return 0;
 }
 
-static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+static inline int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	return -EOPNOTSUPP;
 }
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index bfc5694..4ec6fed 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -5,6 +5,7 @@
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright (C) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -1360,6 +1361,8 @@
  * @debugfs_dir: debugfs dentry, can be used by drivers to create own per
  *	interface debug files. Note that it will be NULL for the virtual
  *	monitor interface (if that is requested.)
+ * @probe_req_reg: probe requests should be reported to mac80211 for this
+ *	interface.
  * @drv_priv: data area for driver use, will always be aligned to
  *	sizeof(void *).
  * @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
@@ -1384,6 +1387,8 @@
 	struct dentry *debugfs_dir;
 #endif
 
+	unsigned int probe_req_reg;
+
 	/* must be last */
 	u8 drv_priv[0] __aligned(sizeof(void *));
 };
@@ -1494,10 +1499,8 @@
  * 	- Temporal Authenticator Rx MIC Key (64 bits)
  * @icv_len: The ICV length for this key type
  * @iv_len: The IV length for this key type
- * @drv_priv: pointer for driver use
  */
 struct ieee80211_key_conf {
-	void *drv_priv;
 	atomic64_t tx_pn;
 	u32 cipher;
 	u8 icv_len;
@@ -1894,6 +1897,12 @@
  * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth
  *	than then BSS bandwidth for a TDLS link on the base channel.
  *
+ * @IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU: The driver supports receiving A-MSDUs
+ *	within A-MPDU.
+ *
+ * @IEEE80211_HW_BEACON_TX_STATUS: The device/driver provides TX status
+ *	for sent beacons.
+ *
  * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
  */
 enum ieee80211_hw_flags {
@@ -1927,6 +1936,8 @@
 	IEEE80211_HW_SUPPORTS_CLONED_SKBS,
 	IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
 	IEEE80211_HW_TDLS_WIDER_BW,
+	IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU,
+	IEEE80211_HW_BEACON_TX_STATUS,
 
 	/* keep last, obviously */
 	NUM_IEEE80211_HW_FLAGS
@@ -2827,6 +2838,13 @@
  *	See the section "Frame filtering" for more information.
  *	This callback must be implemented and can sleep.
  *
+ * @config_iface_filter: Configure the interface's RX filter.
+ *	This callback is optional and is used to configure which frames
+ *	should be passed to mac80211. The filter_flags is the combination
+ *	of FIF_* flags. The changed_flags is a bit mask that indicates
+ *	which flags are changed.
+ *	This callback can sleep.
+ *
  * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit
  * 	must be set or cleared for a given STA. Must be atomic.
  *
@@ -3016,6 +3034,9 @@
  *	buffer size of 8. Correct ways to retransmit #1 would be:
  *	 - TX:       1 or 18 or 81
  *	Even "189" would be wrong since 1 could be lost again.
+ *	The @amsdu parameter is valid when the action is set to
+ *	%IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's ability
+ *	to receive A-MSDU within A-MPDU.
  *
  *	Returns a negative error code on failure.
  *	The callback can sleep.
@@ -3266,6 +3287,10 @@
 				 unsigned int changed_flags,
 				 unsigned int *total_flags,
 				 u64 multicast);
+	void (*config_iface_filter)(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    unsigned int filter_flags,
+				    unsigned int changed_flags);
 	int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
 		       bool set);
 	int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -3349,7 +3374,7 @@
 			    struct ieee80211_vif *vif,
 			    enum ieee80211_ampdu_mlme_action action,
 			    struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			    u8 buf_size);
+			    u8 buf_size, bool amsdu);
 	int (*get_survey)(struct ieee80211_hw *hw, int idx,
 		struct survey_info *survey);
 	void (*rfkill_poll)(struct ieee80211_hw *hw);
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index b7f9961..5718765 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -23,14 +23,6 @@
 
 #include <net/cfg802154.h>
 
-/* General MAC frame format:
- *  2 bytes: Frame Control
- *  1 byte:  Sequence Number
- * 20 bytes: Addressing fields
- * 14 bytes: Auxiliary Security Header
- */
-#define MAC802154_FRAME_HARD_HEADER_LEN		(2 + 1 + 20 + 14)
-
 /**
  * enum ieee802154_hw_addr_filt_flags - hardware address filtering flags
  *
@@ -250,6 +242,21 @@
 };
 
 /**
+ * ieee802154_get_fc_from_skb - get the frame control field from an skb
+ * @skb: skb where the frame control field will be get from
+ */
+static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
+{
+	/* return some invalid fc on failure */
+	if (unlikely(skb->len < 2)) {
+		WARN_ON(1);
+		return cpu_to_le16(0);
+	}
+
+	return (__force __le16)__get_unaligned_memmove16(skb_mac_header(skb));
+}
+
+/**
  * ieee802154_be64_to_le64 - copies and convert be64 to le64
  * @le64_dst: le64 destination pointer
  * @be64_src: be64 source pointer
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index aba5695..bf39374 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -180,15 +180,13 @@
 
 int ndisc_rcv(struct sk_buff *skb);
 
-void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
-		   const struct in6_addr *solicit,
+void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
 		   const struct in6_addr *daddr, const struct in6_addr *saddr,
 		   struct sk_buff *oskb);
 
 void ndisc_send_rs(struct net_device *dev,
 		   const struct in6_addr *saddr, const struct in6_addr *daddr);
-void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
-		   const struct in6_addr *daddr,
+void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
 		   const struct in6_addr *solicited_addr,
 		   bool router, bool solicited, bool override, bool inc_opt);
 
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index 8fe2665..e8d1448 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -45,12 +45,12 @@
 void br_netfilter_enable(void);
 
 #if IS_ENABLED(CONFIG_IPV6)
-int br_validate_ipv6(struct sk_buff *skb);
-unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
+int br_validate_ipv6(struct net *net, struct sk_buff *skb);
+unsigned int br_nf_pre_routing_ipv6(void *priv,
 				    struct sk_buff *skb,
 				    const struct nf_hook_state *state);
 #else
-static inline int br_validate_ipv6(struct sk_buff *skb)
+static inline int br_validate_ipv6(struct net *net, struct sk_buff *skb)
 {
 	return -1;
 }
diff --git a/include/net/netfilter/ipv4/nf_dup_ipv4.h b/include/net/netfilter/ipv4/nf_dup_ipv4.h
index 42008f1..0a14733 100644
--- a/include/net/netfilter/ipv4/nf_dup_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_dup_ipv4.h
@@ -1,7 +1,7 @@
 #ifndef _NF_DUP_IPV4_H_
 #define _NF_DUP_IPV4_H_
 
-void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
 		 const struct in_addr *gw, int oif);
 
 #endif /* _NF_DUP_IPV4_H_ */
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 77862c3..df7ecd8 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -6,7 +6,7 @@
 #include <net/icmp.h>
 
 void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
-void nf_send_reset(struct sk_buff *oldskb, int hook);
+void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook);
 
 const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
 					     struct tcphdr *_oth, int hook);
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
index 27666d8..fb7da5b 100644
--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -5,7 +5,7 @@
 
 int nf_ct_frag6_init(void);
 void nf_ct_frag6_cleanup(void);
-struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user);
 void nf_ct_frag6_consume_orig(struct sk_buff *skb);
 
 struct inet_frags_ctl;
diff --git a/include/net/netfilter/ipv6/nf_dup_ipv6.h b/include/net/netfilter/ipv6/nf_dup_ipv6.h
index ed6bd66..fa6237b 100644
--- a/include/net/netfilter/ipv6/nf_dup_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_dup_ipv6.h
@@ -1,7 +1,7 @@
 #ifndef _NF_DUP_IPV6_H_
 #define _NF_DUP_IPV6_H_
 
-void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
 		 const struct in6_addr *gw, int oif);
 
 #endif /* _NF_DUP_IPV6_H_ */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index e8ad468..d642f68 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -191,7 +191,8 @@
 bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
 
 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
-		       u_int16_t l3num, struct nf_conntrack_tuple *tuple);
+		       u_int16_t l3num, struct net *net,
+		       struct nf_conntrack_tuple *tuple);
 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
 			  const struct nf_conntrack_tuple *orig);
 
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index c03f9c4..788ef58 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -41,6 +41,7 @@
 
 bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff,
 		     unsigned int dataoff, u_int16_t l3num, u_int8_t protonum,
+		     struct net *net,
 		     struct nf_conntrack_tuple *tuple,
 		     const struct nf_conntrack_l3proto *l3proto,
 		     const struct nf_conntrack_l4proto *l4proto);
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 1f70613..956d8a6 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -26,7 +26,7 @@
 	/* Try to fill in the third arg: dataoff is offset past network protocol
            hdr.  Return true if possible. */
 	bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
-			     struct nf_conntrack_tuple *tuple);
+			     struct net *net, struct nf_conntrack_tuple *tuple);
 
 	/* Invert the per-proto part of the tuple: ie. turn xmit into reply.
 	 * Some packets can't be inverted: return 0 in that case.
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index fbfd1ba..186c541 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -10,7 +10,7 @@
 unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
 			   unsigned int hooknum, struct sk_buff *skb);
 
-int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family);
+int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family);
 
 static inline int nf_nat_initialized(struct nf_conn *ct,
 				     enum nf_nat_manip_type manip)
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index a312732..aef3e5f 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -43,31 +43,31 @@
 				  enum ip_conntrack_info ctinfo,
 				  unsigned int hooknum);
 
-unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+unsigned int nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
 			    const struct nf_hook_state *state,
-			    unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+			    unsigned int (*do_chain)(void *priv,
 						     struct sk_buff *skb,
 						     const struct nf_hook_state *state,
 						     struct nf_conn *ct));
 
-unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+unsigned int nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
 			     const struct nf_hook_state *state,
-			     unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+			     unsigned int (*do_chain)(void *priv,
 						      struct sk_buff *skb,
 						      const struct nf_hook_state *state,
 						      struct nf_conn *ct));
 
-unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
+unsigned int nf_nat_ipv4_local_fn(void *priv,
 				  struct sk_buff *skb,
 				  const struct nf_hook_state *state,
-				  unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+				  unsigned int (*do_chain)(void *priv,
 							   struct sk_buff *skb,
 							   const struct nf_hook_state *state,
 							   struct nf_conn *ct));
 
-unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+unsigned int nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
 			    const struct nf_hook_state *state,
-			    unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+			    unsigned int (*do_chain)(void *priv,
 						     struct sk_buff *skb,
 						     const struct nf_hook_state *state,
 						     struct nf_conn *ct));
@@ -76,31 +76,31 @@
 				    enum ip_conntrack_info ctinfo,
 				    unsigned int hooknum, unsigned int hdrlen);
 
-unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+unsigned int nf_nat_ipv6_in(void *priv, struct sk_buff *skb,
 			    const struct nf_hook_state *state,
-			    unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+			    unsigned int (*do_chain)(void *priv,
 						     struct sk_buff *skb,
 						     const struct nf_hook_state *state,
 						     struct nf_conn *ct));
 
-unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+unsigned int nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
 			     const struct nf_hook_state *state,
-			     unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+			     unsigned int (*do_chain)(void *priv,
 						      struct sk_buff *skb,
 						      const struct nf_hook_state *state,
 						      struct nf_conn *ct));
 
-unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
+unsigned int nf_nat_ipv6_local_fn(void *priv,
 				  struct sk_buff *skb,
 				  const struct nf_hook_state *state,
-				  unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+				  unsigned int (*do_chain)(void *priv,
 							   struct sk_buff *skb,
 							   const struct nf_hook_state *state,
 							   struct nf_conn *ct));
 
-unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+unsigned int nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
 			    const struct nf_hook_state *state,
-			    unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+			    unsigned int (*do_chain)(void *priv,
 						     struct sk_buff *skb,
 						     const struct nf_hook_state *state,
 						     struct nf_conn *ct));
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index aa8bee7..c9149cc 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -14,9 +14,11 @@
 
 struct nft_pktinfo {
 	struct sk_buff			*skb;
+	struct net			*net;
 	const struct net_device		*in;
 	const struct net_device		*out;
-	const struct nf_hook_ops	*ops;
+	u8				pf;
+	u8				hook;
 	u8				nhoff;
 	u8				thoff;
 	u8				tprot;
@@ -25,16 +27,15 @@
 };
 
 static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
-				   const struct nf_hook_ops *ops,
 				   struct sk_buff *skb,
 				   const struct nf_hook_state *state)
 {
 	pkt->skb = skb;
+	pkt->net = pkt->xt.net = state->net;
 	pkt->in = pkt->xt.in = state->in;
 	pkt->out = pkt->xt.out = state->out;
-	pkt->ops = ops;
-	pkt->xt.hooknum = ops->hooknum;
-	pkt->xt.family = ops->pf;
+	pkt->hook = pkt->xt.hooknum = state->hook;
+	pkt->pf = pkt->xt.family = state->pf;
 }
 
 /**
@@ -815,8 +816,7 @@
 void nft_unregister_basechain(struct nft_base_chain *basechain,
 			      unsigned int hook_nops);
 
-unsigned int nft_do_chain(struct nft_pktinfo *pkt,
-			  const struct nf_hook_ops *ops);
+unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
 
 /**
  *	struct nft_table - nf_tables table
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index 2df7f96..ca6ef6b 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -6,13 +6,12 @@
 
 static inline void
 nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
-		     const struct nf_hook_ops *ops,
 		     struct sk_buff *skb,
 		     const struct nf_hook_state *state)
 {
 	struct iphdr *ip;
 
-	nft_set_pktinfo(pkt, ops, skb, state);
+	nft_set_pktinfo(pkt, skb, state);
 
 	ip = ip_hdr(pkt->skb);
 	pkt->tprot = ip->protocol;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 97db2e3..8ad39a6 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -6,14 +6,13 @@
 
 static inline int
 nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
-		     const struct nf_hook_ops *ops,
 		     struct sk_buff *skb,
 		     const struct nf_hook_state *state)
 {
 	int protohdr, thoff = 0;
 	unsigned short frag_off;
 
-	nft_set_pktinfo(pkt, ops, skb, state);
+	nft_set_pktinfo(pkt, skb, state);
 
 	protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
 	/* If malformed, drop it */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 2a5dbcc..0e31727 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -1004,6 +1004,15 @@
 }
 
 /**
+ * nla_get_le32 - return payload of __le32 attribute
+ * @nla: __le32 netlink attribute
+ */
+static inline __le32 nla_get_le32(const struct nlattr *nla)
+{
+	return *(__le32 *) nla_data(nla);
+}
+
+/**
  * nla_get_u16 - return payload of u16 attribute
  * @nla: u16 netlink attribute
  */
@@ -1066,6 +1075,15 @@
 }
 
 /**
+ * nla_get_le64 - return payload of __le64 attribute
+ * @nla: __le64 netlink attribute
+ */
+static inline __le64 nla_get_le64(const struct nlattr *nla)
+{
+	return *(__le64 *) nla_data(nla);
+}
+
+/**
  * nla_get_s32 - return payload of s32 attribute
  * @nla: s32 netlink attribute
  */
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index c68926b..46d336a 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -74,6 +74,7 @@
 	int sysctl_icmp_ratelimit;
 	int sysctl_icmp_ratemask;
 	int sysctl_icmp_errors_use_inbound_ifaddr;
+	int sysctl_icmp_redirects_use_orig_daddr;
 
 	struct local_ports ip_local_ports;
 
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index cf2713d..32cb3e5 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -56,6 +56,22 @@
 
 	/* add new commands above here */
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	NL802154_CMD_SET_SEC_PARAMS,
+	NL802154_CMD_GET_SEC_KEY,		/* can dump */
+	NL802154_CMD_NEW_SEC_KEY,
+	NL802154_CMD_DEL_SEC_KEY,
+	NL802154_CMD_GET_SEC_DEV,		/* can dump */
+	NL802154_CMD_NEW_SEC_DEV,
+	NL802154_CMD_DEL_SEC_DEV,
+	NL802154_CMD_GET_SEC_DEVKEY,		/* can dump */
+	NL802154_CMD_NEW_SEC_DEVKEY,
+	NL802154_CMD_DEL_SEC_DEVKEY,
+	NL802154_CMD_GET_SEC_LEVEL,		/* can dump */
+	NL802154_CMD_NEW_SEC_LEVEL,
+	NL802154_CMD_DEL_SEC_LEVEL,
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 	/* used to define NL802154_CMD_MAX below */
 	__NL802154_CMD_AFTER_LAST,
 	NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1
@@ -110,6 +126,18 @@
 
 	/* add attributes here, update the policy in nl802154.c */
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	NL802154_ATTR_SEC_ENABLED,
+	NL802154_ATTR_SEC_OUT_LEVEL,
+	NL802154_ATTR_SEC_OUT_KEY_ID,
+	NL802154_ATTR_SEC_FRAME_COUNTER,
+
+	NL802154_ATTR_SEC_LEVEL,
+	NL802154_ATTR_SEC_DEVICE,
+	NL802154_ATTR_SEC_DEVKEY,
+	NL802154_ATTR_SEC_KEY,
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 	__NL802154_ATTR_AFTER_LAST,
 	NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1
 };
@@ -247,4 +275,167 @@
 	NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
 };
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+
+enum nl802154_dev_addr_modes {
+	NL802154_DEV_ADDR_NONE,
+	__NL802154_DEV_ADDR_INVALID,
+	NL802154_DEV_ADDR_SHORT,
+	NL802154_DEV_ADDR_EXTENDED,
+
+	/* keep last */
+	__NL802154_DEV_ADDR_AFTER_LAST,
+	NL802154_DEV_ADDR_MAX = __NL802154_DEV_ADDR_AFTER_LAST - 1
+};
+
+enum nl802154_dev_addr_attrs {
+	NL802154_DEV_ADDR_ATTR_UNSPEC,
+
+	NL802154_DEV_ADDR_ATTR_PAN_ID,
+	NL802154_DEV_ADDR_ATTR_MODE,
+	NL802154_DEV_ADDR_ATTR_SHORT,
+	NL802154_DEV_ADDR_ATTR_EXTENDED,
+
+	/* keep last */
+	__NL802154_DEV_ADDR_ATTR_AFTER_LAST,
+	NL802154_DEV_ADDR_ATTR_MAX = __NL802154_DEV_ADDR_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_key_id_modes {
+	NL802154_KEY_ID_MODE_IMPLICIT,
+	NL802154_KEY_ID_MODE_INDEX,
+	NL802154_KEY_ID_MODE_INDEX_SHORT,
+	NL802154_KEY_ID_MODE_INDEX_EXTENDED,
+
+	/* keep last */
+	__NL802154_KEY_ID_MODE_AFTER_LAST,
+	NL802154_KEY_ID_MODE_MAX = __NL802154_KEY_ID_MODE_AFTER_LAST - 1
+};
+
+enum nl802154_key_id_attrs {
+	NL802154_KEY_ID_ATTR_UNSPEC,
+
+	NL802154_KEY_ID_ATTR_MODE,
+	NL802154_KEY_ID_ATTR_INDEX,
+	NL802154_KEY_ID_ATTR_IMPLICIT,
+	NL802154_KEY_ID_ATTR_SOURCE_SHORT,
+	NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
+
+	/* keep last */
+	__NL802154_KEY_ID_ATTR_AFTER_LAST,
+	NL802154_KEY_ID_ATTR_MAX = __NL802154_KEY_ID_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_seclevels {
+	NL802154_SECLEVEL_NONE,
+	NL802154_SECLEVEL_MIC32,
+	NL802154_SECLEVEL_MIC64,
+	NL802154_SECLEVEL_MIC128,
+	NL802154_SECLEVEL_ENC,
+	NL802154_SECLEVEL_ENC_MIC32,
+	NL802154_SECLEVEL_ENC_MIC64,
+	NL802154_SECLEVEL_ENC_MIC128,
+
+	/* keep last */
+	__NL802154_SECLEVEL_AFTER_LAST,
+	NL802154_SECLEVEL_MAX = __NL802154_SECLEVEL_AFTER_LAST - 1
+};
+
+enum nl802154_frames {
+	NL802154_FRAME_BEACON,
+	NL802154_FRAME_DATA,
+	NL802154_FRAME_ACK,
+	NL802154_FRAME_CMD,
+
+	/* keep last */
+	__NL802154_FRAME_AFTER_LAST,
+	NL802154_FRAME_MAX = __NL802154_FRAME_AFTER_LAST - 1
+};
+
+enum nl802154_cmd_frames {
+	__NL802154_CMD_FRAME_INVALID,
+	NL802154_CMD_FRAME_ASSOC_REQUEST,
+	NL802154_CMD_FRAME_ASSOC_RESPONSE,
+	NL802154_CMD_FRAME_DISASSOC_NOTIFY,
+	NL802154_CMD_FRAME_DATA_REQUEST,
+	NL802154_CMD_FRAME_PAN_ID_CONFLICT_NOTIFY,
+	NL802154_CMD_FRAME_ORPHAN_NOTIFY,
+	NL802154_CMD_FRAME_BEACON_REQUEST,
+	NL802154_CMD_FRAME_COORD_REALIGNMENT,
+	NL802154_CMD_FRAME_GTS_REQUEST,
+
+	/* keep last */
+	__NL802154_CMD_FRAME_AFTER_LAST,
+	NL802154_CMD_FRAME_MAX = __NL802154_CMD_FRAME_AFTER_LAST - 1
+};
+
+enum nl802154_seclevel_attrs {
+	NL802154_SECLEVEL_ATTR_UNSPEC,
+
+	NL802154_SECLEVEL_ATTR_LEVELS,
+	NL802154_SECLEVEL_ATTR_FRAME,
+	NL802154_SECLEVEL_ATTR_CMD_FRAME,
+	NL802154_SECLEVEL_ATTR_DEV_OVERRIDE,
+
+	/* keep last */
+	__NL802154_SECLEVEL_ATTR_AFTER_LAST,
+	NL802154_SECLEVEL_ATTR_MAX = __NL802154_SECLEVEL_ATTR_AFTER_LAST - 1
+};
+
+/* TODO what is this? couldn't find in mib */
+enum {
+	NL802154_DEVKEY_IGNORE,
+	NL802154_DEVKEY_RESTRICT,
+	NL802154_DEVKEY_RECORD,
+
+	/* keep last */
+	__NL802154_DEVKEY_AFTER_LAST,
+	NL802154_DEVKEY_MAX = __NL802154_DEVKEY_AFTER_LAST - 1
+};
+
+enum nl802154_dev {
+	NL802154_DEV_ATTR_UNSPEC,
+
+	NL802154_DEV_ATTR_FRAME_COUNTER,
+	NL802154_DEV_ATTR_PAN_ID,
+	NL802154_DEV_ATTR_SHORT_ADDR,
+	NL802154_DEV_ATTR_EXTENDED_ADDR,
+	NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
+	NL802154_DEV_ATTR_KEY_MODE,
+
+	/* keep last */
+	__NL802154_DEV_ATTR_AFTER_LAST,
+	NL802154_DEV_ATTR_MAX = __NL802154_DEV_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_devkey {
+	NL802154_DEVKEY_ATTR_UNSPEC,
+
+	NL802154_DEVKEY_ATTR_FRAME_COUNTER,
+	NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
+	NL802154_DEVKEY_ATTR_ID,
+
+	/* keep last */
+	__NL802154_DEVKEY_ATTR_AFTER_LAST,
+	NL802154_DEVKEY_ATTR_MAX = __NL802154_DEVKEY_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_key {
+	NL802154_KEY_ATTR_UNSPEC,
+
+	NL802154_KEY_ATTR_ID,
+	NL802154_KEY_ATTR_USAGE_FRAMES,
+	NL802154_KEY_ATTR_USAGE_CMDS,
+	NL802154_KEY_ATTR_BYTES,
+
+	/* keep last */
+	__NL802154_KEY_ATTR_AFTER_LAST,
+	NL802154_KEY_ATTR_MAX = __NL802154_KEY_ATTR_AFTER_LAST - 1
+};
+
+#define NL802154_KEY_SIZE		16
+#define NL802154_CMD_FRAME_NR_IDS	256
+
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 #endif /* __NL802154_H */
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 87935ca..2e73748 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -32,17 +32,17 @@
 	int		obj_size;
 	struct kmem_cache	*slab;
 	char		*slab_name;
-	int		(*rtx_syn_ack)(struct sock *sk,
+	int		(*rtx_syn_ack)(const struct sock *sk,
 				       struct request_sock *req);
-	void		(*send_ack)(struct sock *sk, struct sk_buff *skb,
+	void		(*send_ack)(const struct sock *sk, struct sk_buff *skb,
 				    struct request_sock *req);
-	void		(*send_reset)(struct sock *sk,
+	void		(*send_reset)(const struct sock *sk,
 				      struct sk_buff *skb);
 	void		(*destructor)(struct request_sock *req);
 	void		(*syn_ack_timeout)(const struct request_sock *req);
 };
 
-int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
+int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
 
 /* struct request_sock - mini sock to represent a connection request
  */
@@ -50,16 +50,15 @@
 	struct sock_common		__req_common;
 #define rsk_refcnt			__req_common.skc_refcnt
 #define rsk_hash			__req_common.skc_hash
+#define rsk_listener			__req_common.skc_listener
+#define rsk_window_clamp		__req_common.skc_window_clamp
+#define rsk_rcv_wnd			__req_common.skc_rcv_wnd
 
 	struct request_sock		*dl_next;
-	struct sock			*rsk_listener;
 	u16				mss;
 	u8				num_retrans; /* number of retransmits */
 	u8				cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
 	u8				num_timeout:7; /* number of timeouts */
-	/* The following two fields can be easily recomputed I think -AK */
-	u32				window_clamp; /* window clamp at creation time */
-	u32				rcv_wnd;	  /* rcv_wnd offered first time */
 	u32				ts_recent;
 	struct timer_list		rsk_timer;
 	const struct request_sock_ops	*rsk_ops;
@@ -69,24 +68,6 @@
 	u32				peer_secid;
 };
 
-static inline struct request_sock *
-reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
-{
-	struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
-
-	if (req) {
-		req->rsk_ops = ops;
-		sock_hold(sk_listener);
-		req->rsk_listener = sk_listener;
-		req->saved_syn = NULL;
-		/* Following is temporary. It is coupled with debugging
-		 * helpers in reqsk_put() & reqsk_free()
-		 */
-		atomic_set(&req->rsk_refcnt, 0);
-	}
-	return req;
-}
-
 static inline struct request_sock *inet_reqsk(struct sock *sk)
 {
 	return (struct request_sock *)sk;
@@ -97,6 +78,34 @@
 	return (struct sock *)req;
 }
 
+static inline struct request_sock *
+reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
+	    bool attach_listener)
+{
+	struct request_sock *req;
+
+	req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
+
+	if (req) {
+		req->rsk_ops = ops;
+		if (attach_listener) {
+			sock_hold(sk_listener);
+			req->rsk_listener = sk_listener;
+		} else {
+			req->rsk_listener = NULL;
+		}
+		req_to_sk(req)->sk_prot = sk_listener->sk_prot;
+		sk_node_init(&req_to_sk(req)->sk_node);
+		sk_tx_queue_clear(req_to_sk(req));
+		req->saved_syn = NULL;
+		/* Following is temporary. It is coupled with debugging
+		 * helpers in reqsk_put() & reqsk_free()
+		 */
+		atomic_set(&req->rsk_refcnt, 0);
+	}
+	return req;
+}
+
 static inline void reqsk_free(struct request_sock *req)
 {
 	/* temporary debugging */
@@ -117,26 +126,6 @@
 
 extern int sysctl_max_syn_backlog;
 
-/** struct listen_sock - listen state
- *
- * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
- */
-struct listen_sock {
-	int			qlen_inc; /* protected by listener lock */
-	int			young_inc;/* protected by listener lock */
-
-	/* following fields can be updated by timer */
-	atomic_t		qlen_dec; /* qlen = qlen_inc - qlen_dec */
-	atomic_t		young_dec;
-
-	u8			max_qlen_log ____cacheline_aligned_in_smp;
-	u8			synflood_warned;
-	/* 2 bytes hole, try to use */
-	u32			hash_rnd;
-	u32			nr_table_entries;
-	struct request_sock	*syn_table[0];
-};
-
 /*
  * For a TCP Fast Open listener -
  *	lock - protects the access to all the reqsk, which is co-owned by
@@ -170,46 +159,29 @@
  * @rskq_accept_head - FIFO head of established children
  * @rskq_accept_tail - FIFO tail of established children
  * @rskq_defer_accept - User waits for some data after accept()
- * @syn_wait_lock - serializer
- *
- * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
- * lock sock while browsing the listening hash (otherwise it's deadlock prone).
  *
  */
 struct request_sock_queue {
+	spinlock_t		rskq_lock;
+	u8			rskq_defer_accept;
+
+	u32			synflood_warned;
+	atomic_t		qlen;
+	atomic_t		young;
+
 	struct request_sock	*rskq_accept_head;
 	struct request_sock	*rskq_accept_tail;
-	u8			rskq_defer_accept;
-	struct listen_sock	*listen_opt;
-	struct fastopen_queue	*fastopenq; /* This is non-NULL iff TFO has been
-					     * enabled on this listener. Check
-					     * max_qlen != 0 in fastopen_queue
-					     * to determine if TFO is enabled
-					     * right at this moment.
+	struct fastopen_queue	fastopenq;  /* Check max_qlen != 0 to determine
+					     * if TFO is enabled.
 					     */
-
-	/* temporary alignment, our goal is to get rid of this lock */
-	spinlock_t		syn_wait_lock ____cacheline_aligned_in_smp;
 };
 
-int reqsk_queue_alloc(struct request_sock_queue *queue,
-		      unsigned int nr_table_entries);
+void reqsk_queue_alloc(struct request_sock_queue *queue);
 
-void __reqsk_queue_destroy(struct request_sock_queue *queue);
-void reqsk_queue_destroy(struct request_sock_queue *queue);
 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
 			   bool reset);
 
-static inline struct request_sock *
-	reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
-{
-	struct request_sock *req = queue->rskq_accept_head;
-
-	queue->rskq_accept_head = NULL;
-	return req;
-}
-
-static inline int reqsk_queue_empty(struct request_sock_queue *queue)
+static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
 {
 	return queue->rskq_accept_head == NULL;
 }
@@ -219,6 +191,7 @@
 				   struct sock *parent,
 				   struct sock *child)
 {
+	spin_lock(&queue->rskq_lock);
 	req->sk = child;
 	sk_acceptq_added(parent);
 
@@ -229,68 +202,48 @@
 
 	queue->rskq_accept_tail = req;
 	req->dl_next = NULL;
+	spin_unlock(&queue->rskq_lock);
 }
 
-static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
+static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
+						      struct sock *parent)
 {
-	struct request_sock *req = queue->rskq_accept_head;
+	struct request_sock *req;
 
-	WARN_ON(req == NULL);
-
-	queue->rskq_accept_head = req->dl_next;
-	if (queue->rskq_accept_head == NULL)
-		queue->rskq_accept_tail = NULL;
-
+	spin_lock_bh(&queue->rskq_lock);
+	req = queue->rskq_accept_head;
+	if (req) {
+		sk_acceptq_removed(parent);
+		queue->rskq_accept_head = req->dl_next;
+		if (queue->rskq_accept_head == NULL)
+			queue->rskq_accept_tail = NULL;
+	}
+	spin_unlock_bh(&queue->rskq_lock);
 	return req;
 }
 
 static inline void reqsk_queue_removed(struct request_sock_queue *queue,
 				       const struct request_sock *req)
 {
-	struct listen_sock *lopt = queue->listen_opt;
-
 	if (req->num_timeout == 0)
-		atomic_inc(&lopt->young_dec);
-	atomic_inc(&lopt->qlen_dec);
+		atomic_dec(&queue->young);
+	atomic_dec(&queue->qlen);
 }
 
 static inline void reqsk_queue_added(struct request_sock_queue *queue)
 {
-	struct listen_sock *lopt = queue->listen_opt;
-
-	lopt->young_inc++;
-	lopt->qlen_inc++;
-}
-
-static inline int listen_sock_qlen(const struct listen_sock *lopt)
-{
-	return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
-}
-
-static inline int listen_sock_young(const struct listen_sock *lopt)
-{
-	return lopt->young_inc - atomic_read(&lopt->young_dec);
+	atomic_inc(&queue->young);
+	atomic_inc(&queue->qlen);
 }
 
 static inline int reqsk_queue_len(const struct request_sock_queue *queue)
 {
-	const struct listen_sock *lopt = queue->listen_opt;
-
-	return lopt ? listen_sock_qlen(lopt) : 0;
+	return atomic_read(&queue->qlen);
 }
 
 static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
 {
-	return listen_sock_young(queue->listen_opt);
+	return atomic_read(&queue->young);
 }
 
-static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
-{
-	return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log;
-}
-
-void reqsk_queue_hash_req(struct request_sock_queue *queue,
-			  u32 hash, struct request_sock *req,
-			  unsigned long timeout);
-
 #endif /* _REQUEST_SOCK_H */
diff --git a/include/net/route.h b/include/net/route.h
index 10a7d21..ee81307 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -28,6 +28,8 @@
 #include <net/inetpeer.h>
 #include <net/flow.h>
 #include <net/inet_sock.h>
+#include <net/ip_fib.h>
+#include <net/l3mdev.h>
 #include <linux/in_route.h>
 #include <linux/rtnetlink.h>
 #include <linux/rcupdate.h>
@@ -112,9 +114,17 @@
 int ip_rt_init(void);
 void rt_cache_flush(struct net *net);
 void rt_flush_dev(struct net_device *dev);
-struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
+struct rtable *__ip_route_output_key_hash(struct net *, struct flowi4 *flp,
+					  int mp_hash);
+
+static inline struct rtable *__ip_route_output_key(struct net *net,
+						   struct flowi4 *flp)
+{
+	return __ip_route_output_key_hash(net, flp, -1);
+}
+
 struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
-				    struct sock *sk);
+				    const struct sock *sk);
 struct dst_entry *ipv4_blackhole_route(struct net *net,
 				       struct dst_entry *dst_orig);
 
@@ -256,9 +266,6 @@
 	if (inet_sk(sk)->transparent)
 		flow_flags |= FLOWI_FLAG_ANYSRC;
 
-	if (netif_index_is_vrf(sock_net(sk), oif))
-		flow_flags |= FLOWI_FLAG_VRFSRC;
-
 	flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
 			   protocol, flow_flags, dst, src, dport, sport);
 }
@@ -275,6 +282,10 @@
 	ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
 			      sport, dport, sk);
 
+	if (!src && oif) {
+		l3mdev_get_saddr(net, oif, fl4);
+		src = fl4->saddr;
+	}
 	if (!dst || !src) {
 		rt = __ip_route_output_key(net, fl4);
 		if (IS_ERR(rt))
diff --git a/include/net/sock.h b/include/net/sock.h
index 94dff7f..64a7545 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -150,6 +150,10 @@
  *	@skc_node: main hash linkage for various protocol lookup tables
  *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
  *	@skc_tx_queue_mapping: tx queue number for this connection
+ *	@skc_flags: place holder for sk_flags
+ *		%SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
+ *		%SO_OOBINLINE settings, %SO_TIMESTAMPING settings
+ *	@skc_incoming_cpu: record/match cpu processing incoming packets
  *	@skc_refcnt: reference count
  *
  *	This is the minimal network layer representation of sockets, the header
@@ -200,6 +204,16 @@
 
 	atomic64_t		skc_cookie;
 
+	/* following fields are padding to force
+	 * offset(struct sock, sk_refcnt) == 128 on 64bit arches
+	 * assuming IPV6 is enabled. We use this padding differently
+	 * for different kind of 'sockets'
+	 */
+	union {
+		unsigned long	skc_flags;
+		struct sock	*skc_listener; /* request_sock */
+		struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */
+	};
 	/*
 	 * fields between dontcopy_begin/dontcopy_end
 	 * are not copied in sock_copy()
@@ -212,9 +226,20 @@
 		struct hlist_nulls_node skc_nulls_node;
 	};
 	int			skc_tx_queue_mapping;
+	union {
+		int		skc_incoming_cpu;
+		u32		skc_rcv_wnd;
+		u32		skc_tw_rcv_nxt; /* struct tcp_timewait_sock  */
+	};
+
 	atomic_t		skc_refcnt;
 	/* private: */
 	int                     skc_dontcopy_end[0];
+	union {
+		u32		skc_rxhash;
+		u32		skc_window_clamp;
+		u32		skc_tw_snd_nxt; /* struct tcp_timewait_sock */
+	};
 	/* public: */
 };
 
@@ -243,8 +268,6 @@
   *	@sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
   *	@sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
   *	@sk_sndbuf: size of send buffer in bytes
-  *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
-  *		   %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
   *	@sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
   *	@sk_no_check_rx: allow zero checksum in RX packets
   *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
@@ -273,8 +296,6 @@
   *	@sk_rcvlowat: %SO_RCVLOWAT setting
   *	@sk_rcvtimeo: %SO_RCVTIMEO setting
   *	@sk_sndtimeo: %SO_SNDTIMEO setting
-  *	@sk_rxhash: flow hash received from netif layer
-  *	@sk_incoming_cpu: record cpu processing incoming packets
   *	@sk_txhash: computed flow hash for use on transmit
   *	@sk_filter: socket filtering instructions
   *	@sk_timer: sock cleanup timer
@@ -331,6 +352,9 @@
 #define sk_v6_daddr		__sk_common.skc_v6_daddr
 #define sk_v6_rcv_saddr	__sk_common.skc_v6_rcv_saddr
 #define sk_cookie		__sk_common.skc_cookie
+#define sk_incoming_cpu		__sk_common.skc_incoming_cpu
+#define sk_flags		__sk_common.skc_flags
+#define sk_rxhash		__sk_common.skc_rxhash
 
 	socket_lock_t		sk_lock;
 	struct sk_buff_head	sk_receive_queue;
@@ -350,14 +374,6 @@
 	} sk_backlog;
 #define sk_rmem_alloc sk_backlog.rmem_alloc
 	int			sk_forward_alloc;
-#ifdef CONFIG_RPS
-	__u32			sk_rxhash;
-#endif
-	u16			sk_incoming_cpu;
-	/* 16bit hole
-	 * Warned : sk_incoming_cpu can be set from softirq,
-	 * Do not use this hole without fully understanding possible issues.
-	 */
 
 	__u32			sk_txhash;
 #ifdef CONFIG_NET_RX_BUSY_POLL
@@ -373,7 +389,6 @@
 #ifdef CONFIG_XFRM
 	struct xfrm_policy	*sk_policy[2];
 #endif
-	unsigned long 		sk_flags;
 	struct dst_entry	*sk_rx_dst;
 	struct dst_entry __rcu	*sk_dst_cache;
 	spinlock_t		sk_dst_lock;
@@ -759,7 +774,7 @@
 
 #endif
 
-static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask)
+static inline gfp_t sk_gfp_atomic(const struct sock *sk, gfp_t gfp_mask)
 {
 	return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC);
 }
@@ -1514,6 +1529,13 @@
 void sock_kzfree_s(struct sock *sk, void *mem, int size);
 void sk_send_sigurg(struct sock *sk);
 
+struct sockcm_cookie {
+	u32 mark;
+};
+
+int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
+		   struct sockcm_cookie *sockc);
+
 /*
  * Functions to fill in entries in struct proto_ops when a protocol
  * does not implement a particular function.
@@ -2201,6 +2223,14 @@
 	return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
 }
 
+/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
+ * SYNACK messages can be attached to either ones (depending on SYNCOOKIE)
+ */
+static inline bool sk_listener(const struct sock *sk)
+{
+	return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
+}
+
 void sock_enable_timestamp(struct sock *sk, int flag);
 int sock_get_timestamp(struct sock *, struct timeval __user *);
 int sock_get_timestampns(struct sock *, struct timespec __user *);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 319baab..1ce7083 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -1,6 +1,6 @@
 /*
  * include/net/switchdev.h - Switch device API
- * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
  * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
@@ -13,70 +13,108 @@
 
 #include <linux/netdevice.h>
 #include <linux/notifier.h>
+#include <linux/list.h>
 
 #define SWITCHDEV_F_NO_RECURSE		BIT(0)
+#define SWITCHDEV_F_SKIP_EOPNOTSUPP	BIT(1)
 
-enum switchdev_trans {
-	SWITCHDEV_TRANS_NONE,
-	SWITCHDEV_TRANS_PREPARE,
-	SWITCHDEV_TRANS_ABORT,
-	SWITCHDEV_TRANS_COMMIT,
+struct switchdev_trans_item {
+	struct list_head list;
+	void *data;
+	void (*destructor)(const void *data);
 };
 
+struct switchdev_trans {
+	struct list_head item_list;
+	bool ph_prepare;
+};
+
+static inline bool switchdev_trans_ph_prepare(struct switchdev_trans *trans)
+{
+	return trans && trans->ph_prepare;
+}
+
+static inline bool switchdev_trans_ph_commit(struct switchdev_trans *trans)
+{
+	return trans && !trans->ph_prepare;
+}
+
 enum switchdev_attr_id {
-	SWITCHDEV_ATTR_UNDEFINED,
-	SWITCHDEV_ATTR_PORT_PARENT_ID,
-	SWITCHDEV_ATTR_PORT_STP_STATE,
-	SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+	SWITCHDEV_ATTR_ID_UNDEFINED,
+	SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
+	SWITCHDEV_ATTR_ID_PORT_STP_STATE,
+	SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
+	SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
 };
 
 struct switchdev_attr {
 	enum switchdev_attr_id id;
-	enum switchdev_trans trans;
 	u32 flags;
 	union {
 		struct netdev_phys_item_id ppid;	/* PORT_PARENT_ID */
 		u8 stp_state;				/* PORT_STP_STATE */
 		unsigned long brport_flags;		/* PORT_BRIDGE_FLAGS */
+		u32 ageing_time;			/* BRIDGE_AGEING_TIME */
 	} u;
 };
 
 struct fib_info;
 
 enum switchdev_obj_id {
-	SWITCHDEV_OBJ_UNDEFINED,
-	SWITCHDEV_OBJ_PORT_VLAN,
-	SWITCHDEV_OBJ_IPV4_FIB,
-	SWITCHDEV_OBJ_PORT_FDB,
+	SWITCHDEV_OBJ_ID_UNDEFINED,
+	SWITCHDEV_OBJ_ID_PORT_VLAN,
+	SWITCHDEV_OBJ_ID_IPV4_FIB,
+	SWITCHDEV_OBJ_ID_PORT_FDB,
 };
 
 struct switchdev_obj {
 	enum switchdev_obj_id id;
-	enum switchdev_trans trans;
-	int (*cb)(struct net_device *dev, struct switchdev_obj *obj);
-	union {
-		struct switchdev_obj_vlan {		/* PORT_VLAN */
-			u16 flags;
-			u16 vid_begin;
-			u16 vid_end;
-		} vlan;
-		struct switchdev_obj_ipv4_fib {		/* IPV4_FIB */
-			u32 dst;
-			int dst_len;
-			struct fib_info *fi;
-			u8 tos;
-			u8 type;
-			u32 nlflags;
-			u32 tb_id;
-		} ipv4_fib;
-		struct switchdev_obj_fdb {		/* PORT_FDB */
-			const unsigned char *addr;
-			u16 vid;
-			u16 ndm_state;
-		} fdb;
-	} u;
 };
 
+/* SWITCHDEV_OBJ_ID_PORT_VLAN */
+struct switchdev_obj_port_vlan {
+	struct switchdev_obj obj;
+	u16 flags;
+	u16 vid_begin;
+	u16 vid_end;
+};
+
+#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
+	container_of(obj, struct switchdev_obj_port_vlan, obj)
+
+/* SWITCHDEV_OBJ_ID_IPV4_FIB */
+struct switchdev_obj_ipv4_fib {
+	struct switchdev_obj obj;
+	u32 dst;
+	int dst_len;
+	struct fib_info *fi;
+	u8 tos;
+	u8 type;
+	u32 nlflags;
+	u32 tb_id;
+};
+
+#define SWITCHDEV_OBJ_IPV4_FIB(obj) \
+	container_of(obj, struct switchdev_obj_ipv4_fib, obj)
+
+/* SWITCHDEV_OBJ_ID_PORT_FDB */
+struct switchdev_obj_port_fdb {
+	struct switchdev_obj obj;
+	const unsigned char *addr;
+	u16 vid;
+	u16 ndm_state;
+};
+
+#define SWITCHDEV_OBJ_PORT_FDB(obj) \
+	container_of(obj, struct switchdev_obj_port_fdb, obj)
+
+void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
+				  void *data, void (*destructor)(void const *),
+				  struct switchdev_trans_item *tritem);
+void *switchdev_trans_item_dequeue(struct switchdev_trans *trans);
+
+typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
+
 /**
  * struct switchdev_ops - switchdev operations
  *
@@ -84,23 +122,26 @@
  *
  * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
  *
- * @switchdev_port_obj_add: Add an object to port (see switchdev_obj).
+ * @switchdev_port_obj_add: Add an object to port (see switchdev_obj_*).
  *
- * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj).
+ * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj_*).
  *
- * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj).
+ * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj_*).
  */
 struct switchdev_ops {
 	int	(*switchdev_port_attr_get)(struct net_device *dev,
 					   struct switchdev_attr *attr);
 	int	(*switchdev_port_attr_set)(struct net_device *dev,
-					   struct switchdev_attr *attr);
+					   struct switchdev_attr *attr,
+					   struct switchdev_trans *trans);
 	int	(*switchdev_port_obj_add)(struct net_device *dev,
-					  struct switchdev_obj *obj);
+					  const struct switchdev_obj *obj,
+					  struct switchdev_trans *trans);
 	int	(*switchdev_port_obj_del)(struct net_device *dev,
-					  struct switchdev_obj *obj);
+					  const struct switchdev_obj *obj);
 	int	(*switchdev_port_obj_dump)(struct net_device *dev,
-					  struct switchdev_obj *obj);
+					   struct switchdev_obj *obj,
+					   switchdev_obj_dump_cb_t *cb);
 };
 
 enum switchdev_notifier_type {
@@ -130,9 +171,12 @@
 			    struct switchdev_attr *attr);
 int switchdev_port_attr_set(struct net_device *dev,
 			    struct switchdev_attr *attr);
-int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj);
-int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj);
-int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj);
+int switchdev_port_obj_add(struct net_device *dev,
+			   const struct switchdev_obj *obj);
+int switchdev_port_obj_del(struct net_device *dev,
+			   const struct switchdev_obj *obj);
+int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
+			    switchdev_obj_dump_cb_t *cb);
 int register_switchdev_notifier(struct notifier_block *nb);
 int unregister_switchdev_notifier(struct notifier_block *nb);
 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
@@ -177,19 +221,20 @@
 }
 
 static inline int switchdev_port_obj_add(struct net_device *dev,
-					 struct switchdev_obj *obj)
+					 const struct switchdev_obj *obj)
 {
 	return -EOPNOTSUPP;
 }
 
 static inline int switchdev_port_obj_del(struct net_device *dev,
-					 struct switchdev_obj *obj)
+					 const struct switchdev_obj *obj)
 {
 	return -EOPNOTSUPP;
 }
 
 static inline int switchdev_port_obj_dump(struct net_device *dev,
-					  struct switchdev_obj *obj)
+					  const struct switchdev_obj *obj,
+					  switchdev_obj_dump_cb_t *cb)
 {
 	return -EOPNOTSUPP;
 }
diff --git a/include/net/tc_act/tc_connmark.h b/include/net/tc_act/tc_connmark.h
index 5c1104c..02caa40 100644
--- a/include/net/tc_act/tc_connmark.h
+++ b/include/net/tc_act/tc_connmark.h
@@ -5,6 +5,7 @@
 
 struct tcf_connmark_info {
 	struct tcf_common common;
+	struct net *net;
 	u16 zone;
 };
 
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0cab28c..a6be56d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -365,8 +365,7 @@
 void tcp_write_timer_handler(struct sock *sk);
 void tcp_delack_timer_handler(struct sock *sk);
 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-			  const struct tcphdr *th, unsigned int len);
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 			 const struct tcphdr *th, unsigned int len);
 void tcp_rcv_space_adjust(struct sock *sk);
@@ -451,19 +450,20 @@
 void tcp_v4_mtu_reduced(struct sock *sk);
 void tcp_req_err(struct sock *sk, u32 seq);
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
-struct sock *tcp_create_openreq_child(struct sock *sk,
+struct sock *tcp_create_openreq_child(const struct sock *sk,
 				      struct request_sock *req,
 				      struct sk_buff *skb);
 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
-struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 				  struct request_sock *req,
 				  struct dst_entry *dst);
 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 int tcp_connect(struct sock *sk);
-struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 				struct request_sock *req,
-				struct tcp_fastopen_cookie *foc);
+				struct tcp_fastopen_cookie *foc,
+				bool attach_req);
 int tcp_disconnect(struct sock *sk, int flags);
 
 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -492,8 +492,9 @@
 
 /* syncookies: remember time of last synqueue overflow
  * But do not dirty this field too often (once per second is enough)
+ * It is racy as we do not hold a lock, but race is very minor.
  */
-static inline void tcp_synq_overflow(struct sock *sk)
+static inline void tcp_synq_overflow(const struct sock *sk)
 {
 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
 	unsigned long now = jiffies;
@@ -520,8 +521,7 @@
 
 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 			      u16 *mssp);
-__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
-			      __u16 *mss);
+__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
 __u32 cookie_init_timestamp(struct request_sock *req);
 bool cookie_timestamp_decode(struct tcp_options_received *opt);
 bool cookie_ecn_ok(const struct tcp_options_received *opt,
@@ -534,8 +534,7 @@
 
 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
 			      const struct tcphdr *th, u16 *mssp);
-__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
-			      __u16 *mss);
+__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
 #endif
 /* tcp_output.c */
 
@@ -565,6 +564,7 @@
 /* tcp_input.c */
 void tcp_resume_early_retransmit(struct sock *sk);
 void tcp_rearm_rto(struct sock *sk);
+void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
 void tcp_reset(struct sock *sk);
 
 /* tcp_timer.c */
@@ -1206,7 +1206,8 @@
 }
 
 extern void tcp_openreq_init_rwin(struct request_sock *req,
-				  struct sock *sk, struct dst_entry *dst);
+				  const struct sock *sk_listener,
+				  const struct dst_entry *dst);
 
 void tcp_enter_memory_pressure(struct sock *sk);
 
@@ -1370,16 +1371,16 @@
 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
 		   int family);
-struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
+struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
 					 const struct sock *addr_sk);
 
 #ifdef CONFIG_TCP_MD5SIG
-struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
+struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
 					 const union tcp_md5_addr *addr,
 					 int family);
 #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
 #else
-static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
+static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
 					 const union tcp_md5_addr *addr,
 					 int family)
 {
@@ -1420,10 +1421,10 @@
 
 extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
 int tcp_fastopen_reset_cipher(void *key, unsigned int len);
-bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
-		      struct request_sock *req,
-		      struct tcp_fastopen_cookie *foc,
-		      struct dst_entry *dst);
+struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
+			      struct request_sock *req,
+			      struct tcp_fastopen_cookie *foc,
+			      struct dst_entry *dst);
 void tcp_fastopen_init_key_once(bool publish);
 #define TCP_FASTOPEN_KEY_LENGTH 16
 
@@ -1618,7 +1619,6 @@
 /* /proc */
 enum tcp_seq_states {
 	TCP_SEQ_STATE_LISTENING,
-	TCP_SEQ_STATE_OPENREQ,
 	TCP_SEQ_STATE_ESTABLISHED,
 };
 
@@ -1637,7 +1637,6 @@
 	enum tcp_seq_states	state;
 	struct sock		*syn_wait_sk;
 	int			bucket, offset, sbucket, num;
-	kuid_t			uid;
 	loff_t			last_pos;
 };
 
@@ -1674,7 +1673,7 @@
 void tcp4_proc_exit(void);
 #endif
 
-int tcp_rtx_synack(struct sock *sk, struct request_sock *req);
+int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
 int tcp_conn_request(struct request_sock_ops *rsk_ops,
 		     const struct tcp_request_sock_ops *af_ops,
 		     struct sock *sk, struct sk_buff *skb);
@@ -1682,7 +1681,7 @@
 /* TCP af-specific functions */
 struct tcp_sock_af_ops {
 #ifdef CONFIG_TCP_MD5SIG
-	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk,
+	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
 						const struct sock *addr_sk);
 	int		(*calc_md5_hash)(char *location,
 					 const struct tcp_md5sig_key *md5,
@@ -1697,40 +1696,42 @@
 struct tcp_request_sock_ops {
 	u16 mss_clamp;
 #ifdef CONFIG_TCP_MD5SIG
-	struct tcp_md5sig_key *(*req_md5_lookup)(struct sock *sk,
+	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
 						 const struct sock *addr_sk);
 	int		(*calc_md5_hash) (char *location,
 					  const struct tcp_md5sig_key *md5,
 					  const struct sock *sk,
 					  const struct sk_buff *skb);
 #endif
-	void (*init_req)(struct request_sock *req, struct sock *sk,
+	void (*init_req)(struct request_sock *req,
+			 const struct sock *sk_listener,
 			 struct sk_buff *skb);
 #ifdef CONFIG_SYN_COOKIES
-	__u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb,
+	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
 				 __u16 *mss);
 #endif
-	struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl,
+	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
 				       const struct request_sock *req,
 				       bool *strict);
 	__u32 (*init_seq)(const struct sk_buff *skb);
-	int (*send_synack)(struct sock *sk, struct dst_entry *dst,
+	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
 			   struct flowi *fl, struct request_sock *req,
-			   u16 queue_mapping, struct tcp_fastopen_cookie *foc);
-	void (*queue_hash_add)(struct sock *sk, struct request_sock *req,
-			       const unsigned long timeout);
+			   u16 queue_mapping, struct tcp_fastopen_cookie *foc,
+			   bool attach_req);
 };
 
 #ifdef CONFIG_SYN_COOKIES
 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
-					 struct sock *sk, struct sk_buff *skb,
+					 const struct sock *sk, struct sk_buff *skb,
 					 __u16 *mss)
 {
-	return ops->cookie_init_seq(sk, skb, mss);
+	tcp_synq_overflow(sk);
+	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
+	return ops->cookie_init_seq(skb, mss);
 }
 #else
 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
-					 struct sock *sk, struct sk_buff *skb,
+					 const struct sock *sk, struct sk_buff *skb,
 					 __u16 *mss)
 {
 	return 0;
diff --git a/include/net/vrf.h b/include/net/vrf.h
deleted file mode 100644
index 593e609..0000000
--- a/include/net/vrf.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * include/net/net_vrf.h - adds vrf dev structure definitions
- * Copyright (c) 2015 Cumulus Networks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __LINUX_NET_VRF_H
-#define __LINUX_NET_VRF_H
-
-struct net_vrf_dev {
-	struct rcu_head		rcu;
-	int                     ifindex; /* ifindex of master dev */
-	u32                     tb_id;   /* table id for VRF */
-};
-
-struct slave {
-	struct list_head	list;
-	struct net_device	*dev;
-};
-
-struct slave_queue {
-	struct list_head	all_slaves;
-};
-
-struct net_vrf {
-	struct slave_queue	queue;
-	struct rtable           *rth;
-	u32			tb_id;
-};
-
-
-#if IS_ENABLED(CONFIG_NET_VRF)
-/* called with rcu_read_lock() */
-static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
-{
-	struct net_vrf_dev *vrf_ptr;
-	int ifindex = 0;
-
-	if (!dev)
-		return 0;
-
-	if (netif_is_vrf(dev)) {
-		ifindex = dev->ifindex;
-	} else {
-		vrf_ptr = rcu_dereference(dev->vrf_ptr);
-		if (vrf_ptr)
-			ifindex = vrf_ptr->ifindex;
-	}
-
-	return ifindex;
-}
-
-static inline int vrf_master_ifindex(const struct net_device *dev)
-{
-	int ifindex;
-
-	rcu_read_lock();
-	ifindex = vrf_master_ifindex_rcu(dev);
-	rcu_read_unlock();
-
-	return ifindex;
-}
-
-/* called with rcu_read_lock */
-static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
-{
-	u32 tb_id = 0;
-
-	if (dev) {
-		struct net_vrf_dev *vrf_ptr;
-
-		vrf_ptr = rcu_dereference(dev->vrf_ptr);
-		if (vrf_ptr)
-			tb_id = vrf_ptr->tb_id;
-	}
-	return tb_id;
-}
-
-static inline u32 vrf_dev_table(const struct net_device *dev)
-{
-	u32 tb_id;
-
-	rcu_read_lock();
-	tb_id = vrf_dev_table_rcu(dev);
-	rcu_read_unlock();
-
-	return tb_id;
-}
-
-static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
-{
-	struct net_device *dev;
-	u32 tb_id = 0;
-
-	if (!ifindex)
-		return 0;
-
-	rcu_read_lock();
-
-	dev = dev_get_by_index_rcu(net, ifindex);
-	if (dev)
-		tb_id = vrf_dev_table_rcu(dev);
-
-	rcu_read_unlock();
-
-	return tb_id;
-}
-
-/* called with rtnl */
-static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
-{
-	u32 tb_id = 0;
-
-	if (dev) {
-		struct net_vrf_dev *vrf_ptr;
-
-		vrf_ptr = rtnl_dereference(dev->vrf_ptr);
-		if (vrf_ptr)
-			tb_id = vrf_ptr->tb_id;
-	}
-	return tb_id;
-}
-
-/* caller has already checked netif_is_vrf(dev) */
-static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
-{
-	struct rtable *rth = ERR_PTR(-ENETUNREACH);
-	struct net_vrf *vrf = netdev_priv(dev);
-
-	if (vrf) {
-		rth = vrf->rth;
-		atomic_inc(&rth->dst.__refcnt);
-	}
-	return rth;
-}
-
-#else
-static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
-{
-	return 0;
-}
-
-static inline int vrf_master_ifindex(const struct net_device *dev)
-{
-	return 0;
-}
-
-static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
-{
-	return 0;
-}
-
-static inline u32 vrf_dev_table(const struct net_device *dev)
-{
-	return 0;
-}
-
-static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
-{
-	return 0;
-}
-
-static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
-{
-	return 0;
-}
-
-static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
-{
-	return ERR_PTR(-ENETUNREACH);
-}
-#endif
-
-#endif /* __LINUX_NET_VRF_H */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 480a319..c1c899c 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -152,7 +152,10 @@
 struct vxlan_dev {
 	struct hlist_node hlist;	/* vni hash table */
 	struct list_head  next;		/* vxlan's per namespace list */
-	struct vxlan_sock *vn_sock;	/* listening socket */
+	struct vxlan_sock *vn4_sock;	/* listening socket for IPv4 */
+#if IS_ENABLED(CONFIG_IPV6)
+	struct vxlan_sock *vn6_sock;	/* listening socket for IPv6 */
+#endif
 	struct net_device *dev;
 	struct net	  *net;		/* netns for packet i/o */
 	struct vxlan_rdst default_dst;	/* default destination */
@@ -195,9 +198,14 @@
 struct net_device *vxlan_dev_create(struct net *net, const char *name,
 				    u8 name_assign_type, struct vxlan_config *conf);
 
-static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan)
+static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan,
+					unsigned short family)
 {
-	return inet_sk(vxlan->vn_sock->sock->sk)->inet_sport;
+#if IS_ENABLED(CONFIG_IPV6)
+	if (family == AF_INET6)
+		return inet_sk(vxlan->vn6_sock->sock->sk)->inet_sport;
+#endif
+	return inet_sk(vxlan->vn4_sock->sock->sk)->inet_sport;
 }
 
 static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index fd17610..4a9c21f 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -333,7 +333,7 @@
 						const xfrm_address_t *saddr);
 	int			(*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
 	int			(*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
-	int			(*output)(struct sock *sk, struct sk_buff *skb);
+	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
 	int			(*output_finish)(struct sock *sk, struct sk_buff *skb);
 	int			(*extract_input)(struct xfrm_state *x,
 						 struct sk_buff *skb);
@@ -1527,7 +1527,7 @@
 
 int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-int xfrm4_output(struct sock *sk, struct sk_buff *skb);
+int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
 int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
 int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
@@ -1552,7 +1552,7 @@
 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
 int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-int xfrm6_output(struct sock *sk, struct sk_buff *skb);
+int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
 int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
 int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
 			  u8 **prevhdr);
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
index 391dae1..a0fa975 100644
--- a/include/rdma/opa_port_info.h
+++ b/include/rdma/opa_port_info.h
@@ -294,8 +294,8 @@
 
 struct opa_port_state_info {
 	struct opa_port_states port_states;
-	u16 link_width_downgrade_tx_active;
-	u16 link_width_downgrade_rx_active;
+	__be16 link_width_downgrade_tx_active;
+	__be16 link_width_downgrade_rx_active;
 };
 
 struct opa_port_info {
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
index 676b03b..11571b2 100644
--- a/include/scsi/scsi_common.h
+++ b/include/scsi/scsi_common.h
@@ -61,4 +61,9 @@
 extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
 				 struct scsi_sense_hdr *sshdr);
 
+extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
+int scsi_set_sense_information(u8 *buf, int buf_len, u64 info);
+extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
+				       int desc_type);
+
 #endif /* _SCSI_COMMON_H_ */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 50c2a36..fe89d7c 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -196,34 +196,13 @@
 	struct execute_work	ew; /* used to get process context on put */
 	struct work_struct	requeue_work;
 
-	struct scsi_dh_data	*scsi_dh_data;
+	struct scsi_device_handler *handler;
+	void			*handler_data;
+
 	enum scsi_device_state sdev_state;
 	unsigned long		sdev_data[0];
 } __attribute__((aligned(sizeof(unsigned long))));
 
-typedef void (*activate_complete)(void *, int);
-struct scsi_device_handler {
-	/* Used by the infrastructure */
-	struct list_head list; /* list of scsi_device_handlers */
-
-	/* Filled by the hardware handler */
-	struct module *module;
-	const char *name;
-	int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
-	struct scsi_dh_data *(*attach)(struct scsi_device *);
-	void (*detach)(struct scsi_device *);
-	int (*activate)(struct scsi_device *, activate_complete, void *);
-	int (*prep_fn)(struct scsi_device *, struct request *);
-	int (*set_params)(struct scsi_device *, const char *);
-	bool (*match)(struct scsi_device *);
-};
-
-struct scsi_dh_data {
-	struct scsi_device_handler *scsi_dh;
-	struct scsi_device *sdev;
-	struct kref kref;
-};
-
 #define	to_scsi_device(d)	\
 	container_of(d, struct scsi_device, sdev_gendev)
 #define	class_to_sdev(d)	\
diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h
index 620c723..85d7317 100644
--- a/include/scsi/scsi_dh.h
+++ b/include/scsi/scsi_dh.h
@@ -55,11 +55,26 @@
 	SCSI_DH_NOSYS,
 	SCSI_DH_DRIVER_MAX,
 };
-#if defined(CONFIG_SCSI_DH) || defined(CONFIG_SCSI_DH_MODULE)
+
+typedef void (*activate_complete)(void *, int);
+struct scsi_device_handler {
+	/* Used by the infrastructure */
+	struct list_head list; /* list of scsi_device_handlers */
+
+	/* Filled by the hardware handler */
+	struct module *module;
+	const char *name;
+	int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
+	int (*attach)(struct scsi_device *);
+	void (*detach)(struct scsi_device *);
+	int (*activate)(struct scsi_device *, activate_complete, void *);
+	int (*prep_fn)(struct scsi_device *, struct request *);
+	int (*set_params)(struct scsi_device *, const char *);
+};
+
+#ifdef CONFIG_SCSI_DH
 extern int scsi_dh_activate(struct request_queue *, activate_complete, void *);
-extern int scsi_dh_handler_exist(const char *);
 extern int scsi_dh_attach(struct request_queue *, const char *);
-extern void scsi_dh_detach(struct request_queue *);
 extern const char *scsi_dh_attached_handler_name(struct request_queue *, gfp_t);
 extern int scsi_dh_set_params(struct request_queue *, const char *);
 #else
@@ -69,18 +84,10 @@
 	fn(data, 0);
 	return 0;
 }
-static inline int scsi_dh_handler_exist(const char *name)
-{
-	return 0;
-}
 static inline int scsi_dh_attach(struct request_queue *req, const char *name)
 {
 	return SCSI_DH_NOSYS;
 }
-static inline void scsi_dh_detach(struct request_queue *q)
-{
-	return;
-}
 static inline const char *scsi_dh_attached_handler_name(struct request_queue *q,
 							gfp_t gfp)
 {
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 8d1d7fa..dbb8c64 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -4,6 +4,7 @@
 #include <linux/scatterlist.h>
 
 #include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_common.h>
 struct scsi_device;
 struct Scsi_Host;
 
@@ -21,14 +22,9 @@
 	return ((sshdr->response_code >= 0x70) && (sshdr->response_code & 1));
 }
 
-extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
-				       int desc_type);
-
 extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
 				   u64 * info_out);
 
-extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
-
 extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
 
 struct scsi_eh_save {
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 0aedbb2..373d334 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -62,6 +62,8 @@
 /* T10 protection information disabled by default */
 #define TA_DEFAULT_T10_PI		0
 #define TA_DEFAULT_FABRIC_PROT_TYPE	0
+/* TPG status needs to be enabled to return sendtargets discovery endpoint info */
+#define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1
 
 #define ISCSI_IOV_DATA_BUFFER		5
 
@@ -517,7 +519,6 @@
 	u16			cid;
 	/* Remote TCP Port */
 	u16			login_port;
-	u16			local_port;
 	int			net_size;
 	int			login_family;
 	u32			auth_id;
@@ -527,9 +528,8 @@
 	u32			exp_statsn;
 	/* Per connection status sequence number */
 	u32			stat_sn;
-#define IPV6_ADDRESS_SPACE				48
-	unsigned char		login_ip[IPV6_ADDRESS_SPACE];
-	unsigned char		local_ip[IPV6_ADDRESS_SPACE];
+	struct sockaddr_storage login_sockaddr;
+	struct sockaddr_storage local_sockaddr;
 	int			conn_usage_count;
 	int			conn_waiting_on_uc;
 	atomic_t		check_immediate_queue;
@@ -636,7 +636,7 @@
 	/* session wide counter: expected command sequence number */
 	u32			exp_cmd_sn;
 	/* session wide counter: maximum allowed command sequence number */
-	u32			max_cmd_sn;
+	atomic_t		max_cmd_sn;
 	struct list_head	sess_ooo_cmdsn_list;
 
 	/* LIO specific session ID */
@@ -764,6 +764,7 @@
 	u32			default_erl;
 	u8			t10_pi;
 	u32			fabric_prot_type;
+	u32			tpg_enabled_sendtargets;
 	struct iscsi_portal_group *tpg;
 };
 
@@ -776,12 +777,10 @@
 	enum iscsi_timer_flags_table np_login_timer_flags;
 	u32			np_exports;
 	enum np_flags_table	np_flags;
-	unsigned char		np_ip[IPV6_ADDRESS_SPACE];
-	u16			np_port;
 	spinlock_t		np_thread_lock;
 	struct completion	np_restart_comp;
 	struct socket		*np_socket;
-	struct __kernel_sockaddr_storage np_sockaddr;
+	struct sockaddr_storage np_sockaddr;
 	struct task_struct	*np_thread;
 	struct timer_list	np_login_timer;
 	void			*np_context;
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
index 3ff76b4..e615bb4 100644
--- a/include/target/iscsi/iscsi_target_stat.h
+++ b/include/target/iscsi/iscsi_target_stat.h
@@ -50,7 +50,7 @@
 	u64		last_fail_time;		/* time stamp (jiffies) */
 	u32		last_fail_type;
 	int		last_intr_fail_ip_family;
-	unsigned char	last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
+	struct sockaddr_storage last_intr_fail_sockaddr;
 	char		last_intr_fail_name[224];
 } ____cacheline_aligned;
 
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index e6bb166..90e37fa 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -9,7 +9,7 @@
 	int priv_size;
 	struct module *owner;
 	struct list_head t_node;
-	int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
+	int (*iscsit_setup_np)(struct iscsi_np *, struct sockaddr_storage *);
 	int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
 	void (*iscsit_free_np)(struct iscsi_np *);
 	void (*iscsit_wait_conn)(struct iscsi_conn *);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 1e5c8f9..56cf8e4 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -93,4 +93,6 @@
 sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
 	sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
 
+bool target_sense_desc_format(struct se_device *dev);
+
 #endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 17ae2d6..5f48754 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -6,6 +6,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/blkdev.h>
 #include <linux/percpu_ida.h>
+#include <linux/t10-pi.h>
 #include <net/sock.h>
 #include <net/tcp.h>
 
@@ -426,12 +427,6 @@
 	TARGET_DIF_CHECK_REFTAG = 0x1 << 2,
 };
 
-struct se_dif_v1_tuple {
-	__be16			guard_tag;
-	__be16			app_tag;
-	__be32			ref_tag;
-};
-
 /* for sam_task_attr */
 #define TCM_SIMPLE_TAG	0x20
 #define TCM_HEAD_TAG	0x21
@@ -444,6 +439,9 @@
 	u8			scsi_asc;
 	u8			scsi_ascq;
 	u16			scsi_sense_length;
+	unsigned		cmd_wait_set:1;
+	unsigned		unknown_data_length:1;
+	bool			state_active:1;
 	u64			tag; /* SAM command identifier aka task tag */
 	/* Delay for ALUA Active/NonOptimized state access in milliseconds */
 	int			alua_nonop_delay;
@@ -455,11 +453,8 @@
 	unsigned int		map_tag;
 	/* Transport protocol dependent state, see transport_state_table */
 	enum transport_state_table t_state;
-	unsigned		cmd_wait_set:1;
-	unsigned		unknown_data_length:1;
 	/* See se_cmd_flags_table */
 	u32			se_cmd_flags;
-	u32			se_ordered_id;
 	/* Total size in bytes associated with command */
 	u32			data_length;
 	u32			residual_count;
@@ -477,7 +472,6 @@
 	struct se_tmr_req	*se_tmr_req;
 	struct list_head	se_cmd_list;
 	struct completion	cmd_wait_comp;
-	struct kref		cmd_kref;
 	const struct target_core_fabric_ops *se_tfo;
 	sense_reason_t		(*execute_cmd)(struct se_cmd *);
 	sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
@@ -497,6 +491,7 @@
 #define CMD_T_REQUEST_STOP	(1 << 8)
 #define CMD_T_BUSY		(1 << 9)
 	spinlock_t		t_state_lock;
+	struct kref		cmd_kref;
 	struct completion	t_transport_stop_comp;
 
 	struct work_struct	work;
@@ -509,8 +504,10 @@
 	struct scatterlist	*t_bidi_data_sg;
 	unsigned int		t_bidi_data_nents;
 
+	/* Used for lun->lun_ref counting */
+	int			lun_ref_active;
+
 	struct list_head	state_list;
-	bool			state_active;
 
 	/* old task stop completion, consider merging with some of the above */
 	struct completion	task_stop_comp;
@@ -518,20 +515,17 @@
 	/* backend private data */
 	void			*priv;
 
-	/* Used for lun->lun_ref counting */
-	int			lun_ref_active;
-
 	/* DIF related members */
 	enum target_prot_op	prot_op;
 	enum target_prot_type	prot_type;
 	u8			prot_checks;
+	bool			prot_pto;
 	u32			prot_length;
 	u32			reftag_seed;
 	struct scatterlist	*t_prot_sg;
 	unsigned int		t_prot_nents;
 	sense_reason_t		pi_err;
 	sector_t		bad_sector;
-	bool			prot_pto;
 };
 
 struct se_ua {
@@ -598,7 +592,6 @@
 };
 
 struct se_lun_acl {
-	char			initiatorname[TRANSPORT_IQN_LEN];
 	u64			mapped_lun;
 	struct se_node_acl	*se_lun_nacl;
 	struct se_lun		*se_lun;
@@ -685,7 +678,6 @@
 #define SE_LUN_LINK_MAGIC			0xffff7771
 	u32			lun_link_magic;
 	u32			lun_access;
-	u32			lun_flags;
 	u32			lun_index;
 
 	/* RELATIVE TARGET PORT IDENTIFER */
@@ -738,6 +730,7 @@
 #define DF_EMULATED_VPD_UNIT_SERIAL		0x00000004
 #define DF_USING_UDEV_PATH			0x00000008
 #define DF_USING_ALIAS				0x00000010
+#define DF_READ_ONLY				0x00000020
 	/* Physical device queue depth */
 	u32			queue_depth;
 	/* Used for SPC-2 reservations enforce of ISIDs */
@@ -751,7 +744,6 @@
 	atomic_long_t		write_bytes;
 	/* Active commands on this virtual SE device */
 	atomic_t		simple_cmds;
-	atomic_t		dev_ordered_id;
 	atomic_t		dev_ordered_sync;
 	atomic_t		dev_qf_count;
 	u32			export_count;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 18afef9..7fb2557 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -5,6 +5,19 @@
 	struct module *module;
 	const char *name;
 	size_t node_acl_size;
+	/*
+	 * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
+	 * Setting this value tells target-core to enforce this limit, and
+	 * report as INQUIRY EVPD=b0 MAXIMUM TRANSFER LENGTH.
+	 *
+	 * target-core will currently reset se_cmd->data_length to this
+	 * maximum size, and set UNDERFLOW residual count if length exceeds
+	 * this limit.
+	 *
+	 * XXX: Not all initiator hosts honor this block-limit EVPD
+	 * XXX: Currently assumes single PAGE_SIZE per scatterlist entry
+	 */
+	u32 max_data_sg_nents;
 	char *(*get_fabric_name)(void);
 	char *(*tpg_get_wwn)(struct se_portal_group *);
 	u16 (*tpg_get_tag)(struct se_portal_group *);
@@ -152,6 +165,7 @@
 void	transport_generic_request_failure(struct se_cmd *, sense_reason_t);
 void	__target_execute_cmd(struct se_cmd *);
 int	transport_lookup_tmr_lun(struct se_cmd *, u64);
+void	core_allocate_nexus_loss_ua(struct se_node_acl *acl);
 
 struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
 		unsigned char *);
diff --git a/include/trace/events/thermal_power_allocator.h b/include/trace/events/thermal_power_allocator.h
index 12e1321..5afae8f 100644
--- a/include/trace/events/thermal_power_allocator.h
+++ b/include/trace/events/thermal_power_allocator.h
@@ -11,7 +11,7 @@
 		 u32 total_req_power, u32 *granted_power,
 		 u32 total_granted_power, size_t num_actors,
 		 u32 power_range, u32 max_allocatable_power,
-		 unsigned long current_temp, s32 delta_temp),
+		 int current_temp, s32 delta_temp),
 	TP_ARGS(tz, req_power, total_req_power, granted_power,
 		total_granted_power, num_actors, power_range,
 		max_allocatable_power, current_temp, delta_temp),
@@ -24,7 +24,7 @@
 		__field(size_t,        num_actors               )
 		__field(u32,           power_range              )
 		__field(u32,           max_allocatable_power    )
-		__field(unsigned long, current_temp             )
+		__field(int,           current_temp             )
 		__field(s32,           delta_temp               )
 	),
 	TP_fast_assign(
@@ -42,7 +42,7 @@
 		__entry->delta_temp = delta_temp;
 	),
 
-	TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%lu delta_temperature=%d",
+	TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d",
 		__entry->tz_id,
 		__print_array(__get_dynamic_array(req_power),
                               __entry->num_actors, 4),
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index e016bd9..ee12400 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -709,15 +709,19 @@
 __SYSCALL(__NR_bpf, sys_bpf)
 #define __NR_execveat 281
 __SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat)
+#define __NR_userfaultfd 282
+__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
+#define __NR_membarrier 283
+__SYSCALL(__NR_membarrier, sys_membarrier)
 
 #undef __NR_syscalls
-#define __NR_syscalls 282
+#define __NR_syscalls 284
 
 /*
  * All syscalls below here should go away really,
  * these are provided for both review and as a porting
  * help for the C library version.
-*
+ *
  * Last chance: are any of these important enough to
  * enable by default?
  */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 70ff1d9..f7b2db4 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -252,6 +252,7 @@
 header-y += media.h
 header-y += media-bus-format.h
 header-y += mei.h
+header-y += membarrier.h
 header-y += memfd.h
 header-y += mempolicy.h
 header-y += meye.h
diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h
index 10f0fa2..9c9c6ad 100644
--- a/include/uapi/linux/atm_zatm.h
+++ b/include/uapi/linux/atm_zatm.h
@@ -35,12 +35,6 @@
 	struct zatm_pool_info info;	/* actual information */
 };
 
-struct zatm_t_hist {
-	struct timeval real;		/* real (wall-clock) time */
-	struct timeval expected;	/* expected real time */
-};
-
-
 #define ZATM_OAM_POOL		0	/* free buffer pool for OAM cells */
 #define ZATM_AAL0_POOL		1	/* free buffer pool for AAL0 cells */
 #define ZATM_AAL5_POOL_BASE	2	/* first AAL5 free buffer pool */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 4ec0b54..564f1f0 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -280,6 +280,13 @@
 	 * Return: TC_ACT_REDIRECT
 	 */
 	BPF_FUNC_redirect,
+
+	/**
+	 * bpf_get_route_realm(skb) - retrieve a dst's tclassid
+	 * @skb: pointer to skb
+	 * Return: realm if != 0
+	 */
+	BPF_FUNC_get_route_realm,
 	__BPF_FUNC_MAX_ID,
 };
 
diff --git a/include/uapi/linux/if_arcnet.h b/include/uapi/linux/if_arcnet.h
index 46e34bd..cfb642f 100644
--- a/include/uapi/linux/if_arcnet.h
+++ b/include/uapi/linux/if_arcnet.h
@@ -19,7 +19,6 @@
 #include <linux/types.h>
 #include <linux/if_ether.h>
 
-
 /*
  *    These are the defined ARCnet Protocol ID's.
  */
@@ -57,42 +56,40 @@
  * The RFC1201-specific components of an arcnet packet header.
  */
 struct arc_rfc1201 {
-    __u8  proto;		/* protocol ID field - varies		*/
-    __u8  split_flag;	/* for use with split packets		*/
-    __be16   sequence;		/* sequence number			*/
-    __u8  payload[0];	/* space remaining in packet (504 bytes)*/
+	__u8  proto;		/* protocol ID field - varies		*/
+	__u8  split_flag;	/* for use with split packets		*/
+	__be16   sequence;	/* sequence number			*/
+	__u8  payload[0];	/* space remaining in packet (504 bytes)*/
 };
 #define RFC1201_HDR_SIZE 4
 
-
 /*
  * The RFC1051-specific components.
  */
 struct arc_rfc1051 {
-    __u8 proto;		/* ARC_P_RFC1051_ARP/RFC1051_IP	*/
-    __u8 payload[0];		/* 507 bytes			*/
+	__u8 proto;		/* ARC_P_RFC1051_ARP/RFC1051_IP	*/
+	__u8 payload[0];	/* 507 bytes			*/
 };
 #define RFC1051_HDR_SIZE 1
 
-
 /*
  * The ethernet-encap-specific components.  We have a real ethernet header
  * and some data.
  */
 struct arc_eth_encap {
-    __u8 proto;		/* Always ARC_P_ETHER			*/
-    struct ethhdr eth;		/* standard ethernet header (yuck!)	*/
-    __u8 payload[0];		/* 493 bytes				*/
+	__u8 proto;		/* Always ARC_P_ETHER			*/
+	struct ethhdr eth;	/* standard ethernet header (yuck!)	*/
+	__u8 payload[0];	/* 493 bytes				*/
 };
 #define ETH_ENCAP_HDR_SIZE 14
 
-
 struct arc_cap {
 	__u8 proto;
-	__u8 cookie[sizeof(int)];   /* Actually NOT sent over the network */
+	__u8 cookie[sizeof(int)];
+				/* Actually NOT sent over the network */
 	union {
 		__u8 ack;
-		__u8 raw[0];		/* 507 bytes */
+		__u8 raw[0];	/* 507 bytes */
 	} mes;
 };
 
@@ -105,9 +102,9 @@
  * driver.
  */
 struct arc_hardware {
-    __u8  source,		/* source ARCnet - filled in automagically */
-             dest,		/* destination ARCnet - 0 for broadcast    */
-    	     offset[2];		/* offset bytes (some weird semantics)     */
+	__u8 source;		/* source ARCnet - filled in automagically */
+	__u8 dest;		/* destination ARCnet - 0 for broadcast    */
+	__u8 offset[2];		/* offset bytes (some weird semantics)     */
 };
 #define ARC_HDR_SIZE 4
 
@@ -116,17 +113,17 @@
  * when you do a raw packet capture).
  */
 struct archdr {
-    /* hardware requirements */
-    struct arc_hardware hard;
-     
-    /* arcnet encapsulation-specific bits */
-    union {
-	struct arc_rfc1201   rfc1201;
-	struct arc_rfc1051   rfc1051;
-	struct arc_eth_encap eth_encap;
-	struct arc_cap       cap;
-	__u8 raw[0];		/* 508 bytes				*/
-    } soft;
+	/* hardware requirements */
+	struct arc_hardware hard;
+
+	/* arcnet encapsulation-specific bits */
+	union {
+		struct arc_rfc1201   rfc1201;
+		struct arc_rfc1051   rfc1051;
+		struct arc_eth_encap eth_encap;
+		struct arc_cap       cap;
+		__u8 raw[0];	/* 508 bytes				*/
+	} soft;
 };
 
 #endif				/* _LINUX_IF_ARCNET_H */
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 3635b77..18db144 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -127,6 +127,7 @@
 #define BRIDGE_VLAN_INFO_UNTAGGED	(1<<2)	/* VLAN egresses untagged */
 #define BRIDGE_VLAN_INFO_RANGE_BEGIN	(1<<3) /* VLAN is start of vlan range */
 #define BRIDGE_VLAN_INFO_RANGE_END	(1<<4) /* VLAN is end of vlan range */
+#define BRIDGE_VLAN_INFO_BRENTRY	(1<<5) /* Global bridge VLAN entry */
 
 struct bridge_vlan_info {
 	__u16 flags;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 3a5f263..e3b6217 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -232,11 +232,47 @@
 	IFLA_BR_PRIORITY,
 	IFLA_BR_VLAN_FILTERING,
 	IFLA_BR_VLAN_PROTOCOL,
+	IFLA_BR_GROUP_FWD_MASK,
+	IFLA_BR_ROOT_ID,
+	IFLA_BR_BRIDGE_ID,
+	IFLA_BR_ROOT_PORT,
+	IFLA_BR_ROOT_PATH_COST,
+	IFLA_BR_TOPOLOGY_CHANGE,
+	IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
+	IFLA_BR_HELLO_TIMER,
+	IFLA_BR_TCN_TIMER,
+	IFLA_BR_TOPOLOGY_CHANGE_TIMER,
+	IFLA_BR_GC_TIMER,
+	IFLA_BR_GROUP_ADDR,
+	IFLA_BR_FDB_FLUSH,
+	IFLA_BR_MCAST_ROUTER,
+	IFLA_BR_MCAST_SNOOPING,
+	IFLA_BR_MCAST_QUERY_USE_IFADDR,
+	IFLA_BR_MCAST_QUERIER,
+	IFLA_BR_MCAST_HASH_ELASTICITY,
+	IFLA_BR_MCAST_HASH_MAX,
+	IFLA_BR_MCAST_LAST_MEMBER_CNT,
+	IFLA_BR_MCAST_STARTUP_QUERY_CNT,
+	IFLA_BR_MCAST_LAST_MEMBER_INTVL,
+	IFLA_BR_MCAST_MEMBERSHIP_INTVL,
+	IFLA_BR_MCAST_QUERIER_INTVL,
+	IFLA_BR_MCAST_QUERY_INTVL,
+	IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
+	IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
+	IFLA_BR_NF_CALL_IPTABLES,
+	IFLA_BR_NF_CALL_IP6TABLES,
+	IFLA_BR_NF_CALL_ARPTABLES,
+	IFLA_BR_VLAN_DEFAULT_PVID,
 	__IFLA_BR_MAX,
 };
 
 #define IFLA_BR_MAX	(__IFLA_BR_MAX - 1)
 
+struct ifla_bridge_id {
+	__u8	prio[2];
+	__u8	addr[6]; /* ETH_ALEN */
+};
+
 enum {
 	BRIDGE_MODE_UNSPEC,
 	BRIDGE_MODE_HAIRPIN,
@@ -256,6 +292,19 @@
 	IFLA_BRPORT_PROXYARP,	/* proxy ARP */
 	IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */
 	IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */
+	IFLA_BRPORT_ROOT_ID,	/* designated root */
+	IFLA_BRPORT_BRIDGE_ID,	/* designated bridge */
+	IFLA_BRPORT_DESIGNATED_PORT,
+	IFLA_BRPORT_DESIGNATED_COST,
+	IFLA_BRPORT_ID,
+	IFLA_BRPORT_NO,
+	IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
+	IFLA_BRPORT_CONFIG_PENDING,
+	IFLA_BRPORT_MESSAGE_AGE_TIMER,
+	IFLA_BRPORT_FORWARD_DELAY_TIMER,
+	IFLA_BRPORT_HOLD_TIMER,
+	IFLA_BRPORT_FLUSH,
+	IFLA_BRPORT_MULTICAST_ROUTER,
 	__IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index 34141a5..f8b0188 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -21,8 +21,6 @@
 	LWTUNNEL_IP_SRC,
 	LWTUNNEL_IP_TTL,
 	LWTUNNEL_IP_TOS,
-	LWTUNNEL_IP_SPORT,
-	LWTUNNEL_IP_DPORT,
 	LWTUNNEL_IP_FLAGS,
 	__LWTUNNEL_IP_MAX,
 };
@@ -36,8 +34,6 @@
 	LWTUNNEL_IP6_SRC,
 	LWTUNNEL_IP6_HOPLIMIT,
 	LWTUNNEL_IP6_TC,
-	LWTUNNEL_IP6_SPORT,
-	LWTUNNEL_IP6_DPORT,
 	LWTUNNEL_IP6_FLAGS,
 	__LWTUNNEL_IP6_MAX,
 };
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
new file mode 100644
index 0000000..e0b108b
--- /dev/null
+++ b/include/uapi/linux/membarrier.h
@@ -0,0 +1,53 @@
+#ifndef _UAPI_LINUX_MEMBARRIER_H
+#define _UAPI_LINUX_MEMBARRIER_H
+
+/*
+ * linux/membarrier.h
+ *
+ * membarrier system call API
+ *
+ * Copyright (c) 2010, 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * enum membarrier_cmd - membarrier system call command
+ * @MEMBARRIER_CMD_QUERY:   Query the set of supported commands. It returns
+ *                          a bitmask of valid commands.
+ * @MEMBARRIER_CMD_SHARED:  Execute a memory barrier on all running threads.
+ *                          Upon return from system call, the caller thread
+ *                          is ensured that all running threads have passed
+ *                          through a state where all memory accesses to
+ *                          user-space addresses match program order between
+ *                          entry to and return from the system call
+ *                          (non-running threads are de facto in such a
+ *                          state). This covers threads from all processes
+ *                          running on the system. This command returns 0.
+ *
+ * Command to be passed to the membarrier system call. The commands need to
+ * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
+ * the value 0.
+ */
+enum membarrier_cmd {
+	MEMBARRIER_CMD_QUERY = 0,
+	MEMBARRIER_CMD_SHARED = (1 << 0),
+};
+
+#endif /* _UAPI_LINUX_MEMBARRIER_H */
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 6f3fe16..f095155 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -54,6 +54,7 @@
 #define NLM_F_ACK		4	/* Reply with ack, with zero or error code */
 #define NLM_F_ECHO		8	/* Echo this request 		*/
 #define NLM_F_DUMP_INTR		16	/* Dump was inconsistent due to sequence change */
+#define NLM_F_DUMP_FILTERED	32	/* Dump was filtered as requested */
 
 /* Modifiers to GET request */
 #define NLM_F_ROOT	0x100	/* specify tree	root	*/
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 32e07d8..4036e1b 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -349,6 +349,8 @@
 	OVS_TUNNEL_KEY_ATTR_TP_SRC,		/* be16 src Transport Port. */
 	OVS_TUNNEL_KEY_ATTR_TP_DST,		/* be16 dst Transport Port. */
 	OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS,		/* Nested OVS_VXLAN_EXT_* */
+	OVS_TUNNEL_KEY_ATTR_IPV6_SRC,		/* struct in6_addr src IPv6 address. */
+	OVS_TUNNEL_KEY_ATTR_IPV6_DST,		/* struct in6_addr dst IPv6 address. */
 	__OVS_TUNNEL_KEY_ATTR_MAX
 };
 
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index b67f99d..95c6521 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -42,10 +42,6 @@
 #define TCMU_MAILBOX_VERSION 2
 #define ALIGN_SIZE 64 /* Should be enough for most CPUs */
 
-/* See https://gcc.gnu.org/onlinedocs/cpp/Stringification.html */
-#define xstr(s) str(s)
-#define str(s) #s
-
 struct tcmu_mailbox {
 	__u16 version;
 	__u16 flags;
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index df0e09b..9057d7a 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -11,8 +11,6 @@
 
 #include <linux/types.h>
 
-#include <linux/compiler.h>
-
 #define UFFD_API ((__u64)0xAA)
 /*
  * After implementing the respective features it will become:
diff --git a/init/Kconfig b/init/Kconfig
index 02da9f1..c24b6f7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1602,6 +1602,18 @@
 	  bugs/quirks. Disable this only if your target machine is
 	  unaffected by PCI quirks.
 
+config MEMBARRIER
+	bool "Enable membarrier() system call" if EXPERT
+	default y
+	help
+	  Enable the membarrier() system call that allows issuing memory
+	  barriers across all running threads, which can be used to distribute
+	  the cost of user-space memory barriers asymmetrically by transforming
+	  pairs of memory barriers into pairs consisting of membarrier() and a
+	  compiler barrier.
+
+	  If unsure, say Y.
+
 config EMBEDDED
 	bool "Embedded system"
 	option allnoconfig_y
diff --git a/ipc/msg.c b/ipc/msg.c
index 66c4f56..1471db9 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -137,13 +137,6 @@
 		return retval;
 	}
 
-	/* ipc_addid() locks msq upon success. */
-	id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
-	if (id < 0) {
-		ipc_rcu_putref(msq, msg_rcu_free);
-		return id;
-	}
-
 	msq->q_stime = msq->q_rtime = 0;
 	msq->q_ctime = get_seconds();
 	msq->q_cbytes = msq->q_qnum = 0;
@@ -153,6 +146,13 @@
 	INIT_LIST_HEAD(&msq->q_receivers);
 	INIT_LIST_HEAD(&msq->q_senders);
 
+	/* ipc_addid() locks msq upon success. */
+	id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
+	if (id < 0) {
+		ipc_rcu_putref(msq, msg_rcu_free);
+		return id;
+	}
+
 	ipc_unlock_object(&msq->q_perm);
 	rcu_read_unlock();
 
diff --git a/ipc/shm.c b/ipc/shm.c
index 222131e..4178727 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -551,12 +551,6 @@
 	if (IS_ERR(file))
 		goto no_file;
 
-	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
-	if (id < 0) {
-		error = id;
-		goto no_id;
-	}
-
 	shp->shm_cprid = task_tgid_vnr(current);
 	shp->shm_lprid = 0;
 	shp->shm_atim = shp->shm_dtim = 0;
@@ -565,6 +559,13 @@
 	shp->shm_nattch = 0;
 	shp->shm_file = file;
 	shp->shm_creator = current;
+
+	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
+	if (id < 0) {
+		error = id;
+		goto no_id;
+	}
+
 	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
 
 	/*
diff --git a/ipc/util.c b/ipc/util.c
index be42300..0f401d9 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -237,6 +237,10 @@
 	rcu_read_lock();
 	spin_lock(&new->lock);
 
+	current_euid_egid(&euid, &egid);
+	new->cuid = new->uid = euid;
+	new->gid = new->cgid = egid;
+
 	id = idr_alloc(&ids->ipcs_idr, new,
 		       (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
 		       GFP_NOWAIT);
@@ -249,10 +253,6 @@
 
 	ids->in_use++;
 
-	current_euid_egid(&euid, &egid);
-	new->cuid = new->uid = euid;
-	new->gid = new->cgid = egid;
-
 	if (next_id < 0) {
 		new->seq = ids->seq++;
 		if (ids->seq > IPCID_SEQ_MAX)
diff --git a/kernel/Makefile b/kernel/Makefile
index d498841..53abf00 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -100,6 +100,7 @@
 obj-$(CONFIG_JUMP_LABEL) += jump_label.o
 obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
 obj-$(CONFIG_TORTURE_TEST) += torture.o
+obj-$(CONFIG_MEMBARRIER) += membarrier.o
 
 obj-$(CONFIG_HAS_IOMEM) += memremap.o
 
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 29ace10..f2d9e69 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/filter.h>
+#include <linux/perf_event.h>
 
 /* Called from syscall */
 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
@@ -48,7 +49,7 @@
 	array->map.key_size = attr->key_size;
 	array->map.value_size = attr->value_size;
 	array->map.max_entries = attr->max_entries;
-
+	array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
 	array->elem_size = elem_size;
 
 	return &array->map;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 67c380c..8086471 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -82,6 +82,8 @@
 	if (fp == NULL)
 		return NULL;
 
+	kmemcheck_annotate_bitfield(fp, meta);
+
 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
 	if (aux == NULL) {
 		vfree(fp);
@@ -110,6 +112,8 @@
 
 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
 	if (fp != NULL) {
+		kmemcheck_annotate_bitfield(fp, meta);
+
 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
 		fp->pages = size / PAGE_SIZE;
 
@@ -727,6 +731,32 @@
 }
 EXPORT_SYMBOL_GPL(bpf_prog_free);
 
+/* RNG for unpriviledged user space with separated state from prandom_u32(). */
+static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
+
+void bpf_user_rnd_init_once(void)
+{
+	prandom_init_once(&bpf_user_rnd_state);
+}
+
+u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	/* Should someone ever have the rather unwise idea to use some
+	 * of the registers passed into this function, then note that
+	 * this function is called from native eBPF and classic-to-eBPF
+	 * transformations. Register assignments from both sides are
+	 * different, f.e. classic always sets fn(ctx, A, X) here.
+	 */
+	struct rnd_state *state;
+	u32 res;
+
+	state = &get_cpu_var(bpf_user_rnd_state);
+	res = prandom_u32_state(state);
+	put_cpu_var(state);
+
+	return res;
+}
+
 /* Weak definitions of helper functions in case we don't have bpf syscall. */
 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 83c209d..28592d7 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -88,6 +88,10 @@
 	htab->elem_size = sizeof(struct htab_elem) +
 			  round_up(htab->map.key_size, 8) +
 			  htab->map.value_size;
+
+	htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) +
+				   htab->elem_size * htab->map.max_entries,
+				   PAGE_SIZE) >> PAGE_SHIFT;
 	return &htab->map;
 
 free_htab:
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 1447ec0..4504ca6 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -93,13 +93,8 @@
 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
 };
 
-static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
-{
-	return prandom_u32();
-}
-
 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
-	.func		= bpf_get_prandom_u32,
+	.func		= bpf_user_rnd_u32,
 	.gpl_only	= false,
 	.ret_type	= RET_INTEGER,
 };
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 35bac8e..f640e5f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -18,6 +18,8 @@
 #include <linux/filter.h>
 #include <linux/version.h>
 
+int sysctl_unprivileged_bpf_disabled __read_mostly;
+
 static LIST_HEAD(bpf_map_types);
 
 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
@@ -44,11 +46,38 @@
 	list_add(&tl->list_node, &bpf_map_types);
 }
 
+static int bpf_map_charge_memlock(struct bpf_map *map)
+{
+	struct user_struct *user = get_current_user();
+	unsigned long memlock_limit;
+
+	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+	atomic_long_add(map->pages, &user->locked_vm);
+
+	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
+		atomic_long_sub(map->pages, &user->locked_vm);
+		free_uid(user);
+		return -EPERM;
+	}
+	map->user = user;
+	return 0;
+}
+
+static void bpf_map_uncharge_memlock(struct bpf_map *map)
+{
+	struct user_struct *user = map->user;
+
+	atomic_long_sub(map->pages, &user->locked_vm);
+	free_uid(user);
+}
+
 /* called from workqueue */
 static void bpf_map_free_deferred(struct work_struct *work)
 {
 	struct bpf_map *map = container_of(work, struct bpf_map, work);
 
+	bpf_map_uncharge_memlock(map);
 	/* implementation dependent freeing */
 	map->ops->map_free(map);
 }
@@ -108,6 +137,10 @@
 
 	atomic_set(&map->refcnt, 1);
 
+	err = bpf_map_charge_memlock(map);
+	if (err)
+		goto free_map;
+
 	err = anon_inode_getfd("bpf-map", &bpf_map_fops, map, O_RDWR | O_CLOEXEC);
 
 	if (err < 0)
@@ -402,6 +435,10 @@
 			 */
 			BUG_ON(!prog->aux->ops->get_func_proto);
 
+			if (insn->imm == BPF_FUNC_get_route_realm)
+				prog->dst_needed = 1;
+			if (insn->imm == BPF_FUNC_get_prandom_u32)
+				bpf_user_rnd_init_once();
 			if (insn->imm == BPF_FUNC_tail_call) {
 				/* mark bpf_tail_call as different opcode
 				 * to avoid conditional branch in
@@ -436,11 +473,37 @@
 	kfree(aux->used_maps);
 }
 
+static int bpf_prog_charge_memlock(struct bpf_prog *prog)
+{
+	struct user_struct *user = get_current_user();
+	unsigned long memlock_limit;
+
+	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+	atomic_long_add(prog->pages, &user->locked_vm);
+	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
+		atomic_long_sub(prog->pages, &user->locked_vm);
+		free_uid(user);
+		return -EPERM;
+	}
+	prog->aux->user = user;
+	return 0;
+}
+
+static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
+{
+	struct user_struct *user = prog->aux->user;
+
+	atomic_long_sub(prog->pages, &user->locked_vm);
+	free_uid(user);
+}
+
 static void __prog_put_rcu(struct rcu_head *rcu)
 {
 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
 
 	free_used_maps(aux);
+	bpf_prog_uncharge_memlock(aux->prog);
 	bpf_prog_free(aux->prog);
 }
 
@@ -540,11 +603,18 @@
 	    attr->kern_version != LINUX_VERSION_CODE)
 		return -EINVAL;
 
+	if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
 	/* plain bpf_prog allocation */
 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
 	if (!prog)
 		return -ENOMEM;
 
+	err = bpf_prog_charge_memlock(prog);
+	if (err)
+		goto free_prog_nouncharge;
+
 	prog->len = attr->insn_cnt;
 
 	err = -EFAULT;
@@ -553,10 +623,10 @@
 		goto free_prog;
 
 	prog->orig_prog = NULL;
-	prog->jited = false;
+	prog->jited = 0;
 
 	atomic_set(&prog->aux->refcnt, 1);
-	prog->gpl_compatible = is_gpl;
+	prog->gpl_compatible = is_gpl ? 1 : 0;
 
 	/* find program type: socket_filter vs tracing_filter */
 	err = find_prog_type(type, prog);
@@ -586,6 +656,8 @@
 free_used_maps:
 	free_used_maps(prog->aux);
 free_prog:
+	bpf_prog_uncharge_memlock(prog);
+free_prog_nouncharge:
 	bpf_prog_free(prog);
 	return err;
 }
@@ -595,11 +667,7 @@
 	union bpf_attr attr = {};
 	int err;
 
-	/* the syscall is limited to root temporarily. This restriction will be
-	 * lifted when security audit is clean. Note that eBPF+tracing must have
-	 * this restriction, since it may pass kernel data to user space
-	 */
-	if (!capable(CAP_SYS_ADMIN))
+	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
 		return -EPERM;
 
 	if (!access_ok(VERIFY_READ, uattr, 1))
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b074b23..1d6b97b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -199,6 +199,7 @@
 	struct verifier_state_list **explored_states; /* search pruning optimization */
 	struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
 	u32 used_map_cnt;		/* number of used maps */
+	bool allow_ptr_leaks;
 };
 
 /* verbose verifier prints what it's seeing
@@ -538,6 +539,21 @@
 		return -EINVAL;
 }
 
+static bool is_spillable_regtype(enum bpf_reg_type type)
+{
+	switch (type) {
+	case PTR_TO_MAP_VALUE:
+	case PTR_TO_MAP_VALUE_OR_NULL:
+	case PTR_TO_STACK:
+	case PTR_TO_CTX:
+	case FRAME_PTR:
+	case CONST_PTR_TO_MAP:
+		return true;
+	default:
+		return false;
+	}
+}
+
 /* check_stack_read/write functions track spill/fill of registers,
  * stack boundary and alignment are checked in check_mem_access()
  */
@@ -550,9 +566,7 @@
 	 */
 
 	if (value_regno >= 0 &&
-	    (state->regs[value_regno].type == PTR_TO_MAP_VALUE ||
-	     state->regs[value_regno].type == PTR_TO_STACK ||
-	     state->regs[value_regno].type == PTR_TO_CTX)) {
+	    is_spillable_regtype(state->regs[value_regno].type)) {
 
 		/* register containing pointer is being spilled into stack */
 		if (size != BPF_REG_SIZE) {
@@ -643,6 +657,20 @@
 	return -EACCES;
 }
 
+static bool is_pointer_value(struct verifier_env *env, int regno)
+{
+	if (env->allow_ptr_leaks)
+		return false;
+
+	switch (env->cur_state.regs[regno].type) {
+	case UNKNOWN_VALUE:
+	case CONST_IMM:
+		return false;
+	default:
+		return true;
+	}
+}
+
 /* check whether memory at (regno + off) is accessible for t = (read | write)
  * if t==write, value_regno is a register which value is stored into memory
  * if t==read, value_regno is a register which will receive the value from memory
@@ -669,11 +697,21 @@
 	}
 
 	if (state->regs[regno].type == PTR_TO_MAP_VALUE) {
+		if (t == BPF_WRITE && value_regno >= 0 &&
+		    is_pointer_value(env, value_regno)) {
+			verbose("R%d leaks addr into map\n", value_regno);
+			return -EACCES;
+		}
 		err = check_map_access(env, regno, off, size);
 		if (!err && t == BPF_READ && value_regno >= 0)
 			mark_reg_unknown_value(state->regs, value_regno);
 
 	} else if (state->regs[regno].type == PTR_TO_CTX) {
+		if (t == BPF_WRITE && value_regno >= 0 &&
+		    is_pointer_value(env, value_regno)) {
+			verbose("R%d leaks addr into ctx\n", value_regno);
+			return -EACCES;
+		}
 		err = check_ctx_access(env, off, size, t);
 		if (!err && t == BPF_READ && value_regno >= 0)
 			mark_reg_unknown_value(state->regs, value_regno);
@@ -684,10 +722,17 @@
 			verbose("invalid stack off=%d size=%d\n", off, size);
 			return -EACCES;
 		}
-		if (t == BPF_WRITE)
+		if (t == BPF_WRITE) {
+			if (!env->allow_ptr_leaks &&
+			    state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
+			    size != BPF_REG_SIZE) {
+				verbose("attempt to corrupt spilled pointer on stack\n");
+				return -EACCES;
+			}
 			err = check_stack_write(state, off, size, value_regno);
-		else
+		} else {
 			err = check_stack_read(state, off, size, value_regno);
+		}
 	} else {
 		verbose("R%d invalid mem access '%s'\n",
 			regno, reg_type_str[state->regs[regno].type]);
@@ -775,8 +820,13 @@
 		return -EACCES;
 	}
 
-	if (arg_type == ARG_ANYTHING)
+	if (arg_type == ARG_ANYTHING) {
+		if (is_pointer_value(env, regno)) {
+			verbose("R%d leaks addr into helper function\n", regno);
+			return -EACCES;
+		}
 		return 0;
+	}
 
 	if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
 	    arg_type == ARG_PTR_TO_MAP_VALUE) {
@@ -950,8 +1000,9 @@
 }
 
 /* check validity of 32-bit and 64-bit arithmetic operations */
-static int check_alu_op(struct reg_state *regs, struct bpf_insn *insn)
+static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
 {
+	struct reg_state *regs = env->cur_state.regs;
 	u8 opcode = BPF_OP(insn->code);
 	int err;
 
@@ -976,6 +1027,12 @@
 		if (err)
 			return err;
 
+		if (is_pointer_value(env, insn->dst_reg)) {
+			verbose("R%d pointer arithmetic prohibited\n",
+				insn->dst_reg);
+			return -EACCES;
+		}
+
 		/* check dest operand */
 		err = check_reg_arg(regs, insn->dst_reg, DST_OP);
 		if (err)
@@ -1012,6 +1069,11 @@
 				 */
 				regs[insn->dst_reg] = regs[insn->src_reg];
 			} else {
+				if (is_pointer_value(env, insn->src_reg)) {
+					verbose("R%d partial copy of pointer\n",
+						insn->src_reg);
+					return -EACCES;
+				}
 				regs[insn->dst_reg].type = UNKNOWN_VALUE;
 				regs[insn->dst_reg].map_ptr = NULL;
 			}
@@ -1061,8 +1123,18 @@
 		/* pattern match 'bpf_add Rx, imm' instruction */
 		if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
 		    regs[insn->dst_reg].type == FRAME_PTR &&
-		    BPF_SRC(insn->code) == BPF_K)
+		    BPF_SRC(insn->code) == BPF_K) {
 			stack_relative = true;
+		} else if (is_pointer_value(env, insn->dst_reg)) {
+			verbose("R%d pointer arithmetic prohibited\n",
+				insn->dst_reg);
+			return -EACCES;
+		} else if (BPF_SRC(insn->code) == BPF_X &&
+			   is_pointer_value(env, insn->src_reg)) {
+			verbose("R%d pointer arithmetic prohibited\n",
+				insn->src_reg);
+			return -EACCES;
+		}
 
 		/* check dest operand */
 		err = check_reg_arg(regs, insn->dst_reg, DST_OP);
@@ -1101,6 +1173,12 @@
 		err = check_reg_arg(regs, insn->src_reg, SRC_OP);
 		if (err)
 			return err;
+
+		if (is_pointer_value(env, insn->src_reg)) {
+			verbose("R%d pointer comparison prohibited\n",
+				insn->src_reg);
+			return -EACCES;
+		}
 	} else {
 		if (insn->src_reg != BPF_REG_0) {
 			verbose("BPF_JMP uses reserved fields\n");
@@ -1155,6 +1233,9 @@
 			regs[insn->dst_reg].type = CONST_IMM;
 			regs[insn->dst_reg].imm = 0;
 		}
+	} else if (is_pointer_value(env, insn->dst_reg)) {
+		verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
+		return -EACCES;
 	} else if (BPF_SRC(insn->code) == BPF_K &&
 		   (opcode == BPF_JEQ || opcode == BPF_JNE)) {
 
@@ -1658,7 +1739,7 @@
 		}
 
 		if (class == BPF_ALU || class == BPF_ALU64) {
-			err = check_alu_op(regs, insn);
+			err = check_alu_op(env, insn);
 			if (err)
 				return err;
 
@@ -1816,6 +1897,11 @@
 				if (err)
 					return err;
 
+				if (is_pointer_value(env, BPF_REG_0)) {
+					verbose("R0 leaks addr as return value\n");
+					return -EACCES;
+				}
+
 process_bpf_exit:
 				insn_idx = pop_stack(env, &prev_insn_idx);
 				if (insn_idx < 0) {
@@ -2024,7 +2110,7 @@
 
 		cnt = env->prog->aux->ops->
 			convert_ctx_access(type, insn->dst_reg, insn->src_reg,
-					   insn->off, insn_buf);
+					   insn->off, insn_buf, env->prog);
 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
 			verbose("bpf verifier is misconfigured\n");
 			return -EINVAL;
@@ -2144,6 +2230,8 @@
 	if (ret < 0)
 		goto skip_full_check;
 
+	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
+
 	ret = do_check(env);
 
 skip_full_check:
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2cf0f79..2c9eae6 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -46,7 +46,6 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/rwsem.h>
-#include <linux/percpu-rwsem.h>
 #include <linux/string.h>
 #include <linux/sort.h>
 #include <linux/kmod.h>
@@ -104,8 +103,6 @@
  */
 static DEFINE_SPINLOCK(release_agent_path_lock);
 
-struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
-
 #define cgroup_assert_mutex_or_rcu_locked()				\
 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
 			   !lockdep_is_held(&cgroup_mutex),		\
@@ -874,6 +871,48 @@
 	return cset;
 }
 
+void cgroup_threadgroup_change_begin(struct task_struct *tsk)
+{
+	down_read(&tsk->signal->group_rwsem);
+}
+
+void cgroup_threadgroup_change_end(struct task_struct *tsk)
+{
+	up_read(&tsk->signal->group_rwsem);
+}
+
+/**
+ * threadgroup_lock - lock threadgroup
+ * @tsk: member task of the threadgroup to lock
+ *
+ * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
+ * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
+ * change ->group_leader/pid.  This is useful for cases where the threadgroup
+ * needs to stay stable across blockable operations.
+ *
+ * fork and exit explicitly call threadgroup_change_{begin|end}() for
+ * synchronization.  While held, no new task will be added to threadgroup
+ * and no existing live task will have its PF_EXITING set.
+ *
+ * de_thread() does threadgroup_change_{begin|end}() when a non-leader
+ * sub-thread becomes a new leader.
+ */
+static void threadgroup_lock(struct task_struct *tsk)
+{
+	down_write(&tsk->signal->group_rwsem);
+}
+
+/**
+ * threadgroup_unlock - unlock threadgroup
+ * @tsk: member task of the threadgroup to unlock
+ *
+ * Reverse threadgroup_lock().
+ */
+static inline void threadgroup_unlock(struct task_struct *tsk)
+{
+	up_write(&tsk->signal->group_rwsem);
+}
+
 static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
 {
 	struct cgroup *root_cgrp = kf_root->kn->priv;
@@ -2074,9 +2113,9 @@
 	lockdep_assert_held(&css_set_rwsem);
 
 	/*
-	 * We are synchronized through cgroup_threadgroup_rwsem against
-	 * PF_EXITING setting such that we can't race against cgroup_exit()
-	 * changing the css_set to init_css_set and dropping the old one.
+	 * We are synchronized through threadgroup_lock() against PF_EXITING
+	 * setting such that we can't race against cgroup_exit() changing the
+	 * css_set to init_css_set and dropping the old one.
 	 */
 	WARN_ON_ONCE(tsk->flags & PF_EXITING);
 	old_cset = task_css_set(tsk);
@@ -2133,11 +2172,10 @@
  * @src_cset and add it to @preloaded_csets, which should later be cleaned
  * up by cgroup_migrate_finish().
  *
- * This function may be called without holding cgroup_threadgroup_rwsem
- * even if the target is a process.  Threads may be created and destroyed
- * but as long as cgroup_mutex is not dropped, no new css_set can be put
- * into play and the preloaded css_sets are guaranteed to cover all
- * migrations.
+ * This function may be called without holding threadgroup_lock even if the
+ * target is a process.  Threads may be created and destroyed but as long
+ * as cgroup_mutex is not dropped, no new css_set can be put into play and
+ * the preloaded css_sets are guaranteed to cover all migrations.
  */
 static void cgroup_migrate_add_src(struct css_set *src_cset,
 				   struct cgroup *dst_cgrp,
@@ -2240,7 +2278,7 @@
  * @threadgroup: whether @leader points to the whole process or a single task
  *
  * Migrate a process or task denoted by @leader to @cgrp.  If migrating a
- * process, the caller must be holding cgroup_threadgroup_rwsem.  The
+ * process, the caller must be holding threadgroup_lock of @leader.  The
  * caller is also responsible for invoking cgroup_migrate_add_src() and
  * cgroup_migrate_prepare_dst() on the targets before invoking this
  * function and following up with cgroup_migrate_finish().
@@ -2368,7 +2406,7 @@
  * @leader: the task or the leader of the threadgroup to be attached
  * @threadgroup: attach the whole threadgroup?
  *
- * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
+ * Call holding cgroup_mutex and threadgroup_lock of @leader.
  */
 static int cgroup_attach_task(struct cgroup *dst_cgrp,
 			      struct task_struct *leader, bool threadgroup)
@@ -2460,13 +2498,14 @@
 	if (!cgrp)
 		return -ENODEV;
 
-	percpu_down_write(&cgroup_threadgroup_rwsem);
+retry_find_task:
 	rcu_read_lock();
 	if (pid) {
 		tsk = find_task_by_vpid(pid);
 		if (!tsk) {
+			rcu_read_unlock();
 			ret = -ESRCH;
-			goto out_unlock_rcu;
+			goto out_unlock_cgroup;
 		}
 	} else {
 		tsk = current;
@@ -2482,23 +2521,37 @@
 	 */
 	if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
 		ret = -EINVAL;
-		goto out_unlock_rcu;
+		rcu_read_unlock();
+		goto out_unlock_cgroup;
 	}
 
 	get_task_struct(tsk);
 	rcu_read_unlock();
 
+	threadgroup_lock(tsk);
+	if (threadgroup) {
+		if (!thread_group_leader(tsk)) {
+			/*
+			 * a race with de_thread from another thread's exec()
+			 * may strip us of our leadership, if this happens,
+			 * there is no choice but to throw this task away and
+			 * try again; this is
+			 * "double-double-toil-and-trouble-check locking".
+			 */
+			threadgroup_unlock(tsk);
+			put_task_struct(tsk);
+			goto retry_find_task;
+		}
+	}
+
 	ret = cgroup_procs_write_permission(tsk, cgrp, of);
 	if (!ret)
 		ret = cgroup_attach_task(cgrp, tsk, threadgroup);
 
-	put_task_struct(tsk);
-	goto out_unlock_threadgroup;
+	threadgroup_unlock(tsk);
 
-out_unlock_rcu:
-	rcu_read_unlock();
-out_unlock_threadgroup:
-	percpu_up_write(&cgroup_threadgroup_rwsem);
+	put_task_struct(tsk);
+out_unlock_cgroup:
 	cgroup_kn_unlock(of->kn);
 	return ret ?: nbytes;
 }
@@ -2643,8 +2696,6 @@
 
 	lockdep_assert_held(&cgroup_mutex);
 
-	percpu_down_write(&cgroup_threadgroup_rwsem);
-
 	/* look up all csses currently attached to @cgrp's subtree */
 	down_read(&css_set_rwsem);
 	css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
@@ -2700,8 +2751,17 @@
 				goto out_finish;
 			last_task = task;
 
+			threadgroup_lock(task);
+			/* raced against de_thread() from another thread? */
+			if (!thread_group_leader(task)) {
+				threadgroup_unlock(task);
+				put_task_struct(task);
+				continue;
+			}
+
 			ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
 
+			threadgroup_unlock(task);
 			put_task_struct(task);
 
 			if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
@@ -2711,7 +2771,6 @@
 
 out_finish:
 	cgroup_migrate_finish(&preloaded_csets);
-	percpu_up_write(&cgroup_threadgroup_rwsem);
 	return ret;
 }
 
@@ -5024,7 +5083,6 @@
 	unsigned long key;
 	int ssid, err;
 
-	BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
 	BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
 	BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
 
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
index 9656a3c..009cc9a 100644
--- a/kernel/cpu_pm.c
+++ b/kernel/cpu_pm.c
@@ -180,7 +180,7 @@
  * low power state that may have caused some blocks in the same power domain
  * to reset.
  *
- * Must be called after cpu_pm_exit has been called on all cpus in the power
+ * Must be called after cpu_cluster_pm_enter has been called for the power
  * domain, and before cpu_pm_exit has been called on any cpu in the power
  * domain. Notified drivers can include VFP co-processor, interrupt controller
  * and its PM extensions, local CPU timers context save/restore which
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f548f69..b11756f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1243,11 +1243,7 @@
 					      PERF_EVENT_STATE_INACTIVE;
 }
 
-/*
- * Called at perf_event creation and when events are attached/detached from a
- * group.
- */
-static void perf_event__read_size(struct perf_event *event)
+static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
 {
 	int entry = sizeof(u64); /* value */
 	int size = 0;
@@ -1263,7 +1259,7 @@
 		entry += sizeof(u64);
 
 	if (event->attr.read_format & PERF_FORMAT_GROUP) {
-		nr += event->group_leader->nr_siblings;
+		nr += nr_siblings;
 		size += sizeof(u64);
 	}
 
@@ -1271,14 +1267,11 @@
 	event->read_size = size;
 }
 
-static void perf_event__header_size(struct perf_event *event)
+static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
 {
 	struct perf_sample_data *data;
-	u64 sample_type = event->attr.sample_type;
 	u16 size = 0;
 
-	perf_event__read_size(event);
-
 	if (sample_type & PERF_SAMPLE_IP)
 		size += sizeof(data->ip);
 
@@ -1303,6 +1296,17 @@
 	event->header_size = size;
 }
 
+/*
+ * Called at perf_event creation and when events are attached/detached from a
+ * group.
+ */
+static void perf_event__header_size(struct perf_event *event)
+{
+	__perf_event_read_size(event,
+			       event->group_leader->nr_siblings);
+	__perf_event_header_size(event, event->attr.sample_type);
+}
+
 static void perf_event__id_header_size(struct perf_event *event)
 {
 	struct perf_sample_data *data;
@@ -1330,6 +1334,27 @@
 	event->id_header_size = size;
 }
 
+static bool perf_event_validate_size(struct perf_event *event)
+{
+	/*
+	 * The values computed here will be over-written when we actually
+	 * attach the event.
+	 */
+	__perf_event_read_size(event, event->group_leader->nr_siblings + 1);
+	__perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
+	perf_event__id_header_size(event);
+
+	/*
+	 * Sum the lot; should not exceed the 64k limit we have on records.
+	 * Conservative limit to allow for callchains and other variable fields.
+	 */
+	if (event->read_size + event->header_size +
+	    event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
+		return false;
+
+	return true;
+}
+
 static void perf_group_attach(struct perf_event *event)
 {
 	struct perf_event *group_leader = event->group_leader, *pos;
@@ -8297,13 +8322,35 @@
 
 	if (move_group) {
 		gctx = group_leader->ctx;
+		mutex_lock_double(&gctx->mutex, &ctx->mutex);
+	} else {
+		mutex_lock(&ctx->mutex);
+	}
 
+	if (!perf_event_validate_size(event)) {
+		err = -E2BIG;
+		goto err_locked;
+	}
+
+	/*
+	 * Must be under the same ctx::mutex as perf_install_in_context(),
+	 * because we need to serialize with concurrent event creation.
+	 */
+	if (!exclusive_event_installable(event, ctx)) {
+		/* exclusive and group stuff are assumed mutually exclusive */
+		WARN_ON_ONCE(move_group);
+
+		err = -EBUSY;
+		goto err_locked;
+	}
+
+	WARN_ON_ONCE(ctx->parent_ctx);
+
+	if (move_group) {
 		/*
 		 * See perf_event_ctx_lock() for comments on the details
 		 * of swizzling perf_event::ctx.
 		 */
-		mutex_lock_double(&gctx->mutex, &ctx->mutex);
-
 		perf_remove_from_context(group_leader, false);
 
 		list_for_each_entry(sibling, &group_leader->sibling_list,
@@ -8311,13 +8358,7 @@
 			perf_remove_from_context(sibling, false);
 			put_ctx(gctx);
 		}
-	} else {
-		mutex_lock(&ctx->mutex);
-	}
 
-	WARN_ON_ONCE(ctx->parent_ctx);
-
-	if (move_group) {
 		/*
 		 * Wait for everybody to stop referencing the events through
 		 * the old lists, before installing it on new lists.
@@ -8349,22 +8390,29 @@
 		perf_event__state_init(group_leader);
 		perf_install_in_context(ctx, group_leader, group_leader->cpu);
 		get_ctx(ctx);
+
+		/*
+		 * Now that all events are installed in @ctx, nothing
+		 * references @gctx anymore, so drop the last reference we have
+		 * on it.
+		 */
+		put_ctx(gctx);
 	}
 
-	if (!exclusive_event_installable(event, ctx)) {
-		err = -EBUSY;
-		mutex_unlock(&ctx->mutex);
-		fput(event_file);
-		goto err_context;
-	}
+	/*
+	 * Precalculate sample_data sizes; do while holding ctx::mutex such
+	 * that we're serialized against further additions and before
+	 * perf_install_in_context() which is the point the event is active and
+	 * can use these values.
+	 */
+	perf_event__header_size(event);
+	perf_event__id_header_size(event);
 
 	perf_install_in_context(ctx, event, event->cpu);
 	perf_unpin_context(ctx);
 
-	if (move_group) {
+	if (move_group)
 		mutex_unlock(&gctx->mutex);
-		put_ctx(gctx);
-	}
 	mutex_unlock(&ctx->mutex);
 
 	put_online_cpus();
@@ -8376,12 +8424,6 @@
 	mutex_unlock(&current->perf_event_mutex);
 
 	/*
-	 * Precalculate sample_data sizes
-	 */
-	perf_event__header_size(event);
-	perf_event__id_header_size(event);
-
-	/*
 	 * Drop the reference on the group_event after placing the
 	 * new event on the sibling_list. This ensures destruction
 	 * of the group leader will find the pointer to itself in
@@ -8391,6 +8433,12 @@
 	fd_install(event_fd, event_file);
 	return event_fd;
 
+err_locked:
+	if (move_group)
+		mutex_unlock(&gctx->mutex);
+	mutex_unlock(&ctx->mutex);
+/* err_file: */
+	fput(event_file);
 err_context:
 	perf_unpin_context(ctx);
 	put_ctx(ctx);
diff --git a/kernel/fork.c b/kernel/fork.c
index 7d5f0f1..2845623 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1149,6 +1149,10 @@
 	tty_audit_fork(sig);
 	sched_autogroup_fork(sig);
 
+#ifdef CONFIG_CGROUPS
+	init_rwsem(&sig->group_rwsem);
+#endif
+
 	sig->oom_score_adj = current->signal->oom_score_adj;
 	sig->oom_score_adj_min = current->signal->oom_score_adj_min;
 
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 6e40a95..e28169d 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -83,7 +83,7 @@
 
 	if (!desc)
 		return -EINVAL;
-	desc->irq_data.handler_data = data;
+	desc->irq_common_data.handler_data = data;
 	irq_put_desc_unlock(desc, flags);
 	return 0;
 }
@@ -105,7 +105,7 @@
 
 	if (!desc)
 		return -EINVAL;
-	desc->irq_data.msi_desc = entry;
+	desc->irq_common_data.msi_desc = entry;
 	if (entry && !irq_offset)
 		entry->irq = irq_base;
 	irq_put_desc_unlock(desc, flags);
@@ -372,7 +372,6 @@
 
 /**
  *	handle_simple_irq - Simple and software-decoded IRQs.
- *	@irq:	the interrupt number
  *	@desc:	the interrupt description structure for this irq
  *
  *	Simple interrupts are either sent from a demultiplexing interrupt
@@ -382,8 +381,7 @@
  *	Note: The caller is expected to handle the ack, clear, mask and
  *	unmask issues if necessary.
  */
-void
-handle_simple_irq(unsigned int irq, struct irq_desc *desc)
+void handle_simple_irq(struct irq_desc *desc)
 {
 	raw_spin_lock(&desc->lock);
 
@@ -425,7 +423,6 @@
 
 /**
  *	handle_level_irq - Level type irq handler
- *	@irq:	the interrupt number
  *	@desc:	the interrupt description structure for this irq
  *
  *	Level type interrupts are active as long as the hardware line has
@@ -433,8 +430,7 @@
  *	it after the associated handler has acknowledged the device, so the
  *	interrupt line is back to inactive.
  */
-void
-handle_level_irq(unsigned int irq, struct irq_desc *desc)
+void handle_level_irq(struct irq_desc *desc)
 {
 	raw_spin_lock(&desc->lock);
 	mask_ack_irq(desc);
@@ -496,7 +492,6 @@
 
 /**
  *	handle_fasteoi_irq - irq handler for transparent controllers
- *	@irq:	the interrupt number
  *	@desc:	the interrupt description structure for this irq
  *
  *	Only a single callback will be issued to the chip: an ->eoi()
@@ -504,8 +499,7 @@
  *	for modern forms of interrupt handlers, which handle the flow
  *	details in hardware, transparently.
  */
-void
-handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
+void handle_fasteoi_irq(struct irq_desc *desc)
 {
 	struct irq_chip *chip = desc->irq_data.chip;
 
@@ -546,7 +540,6 @@
 
 /**
  *	handle_edge_irq - edge type IRQ handler
- *	@irq:	the interrupt number
  *	@desc:	the interrupt description structure for this irq
  *
  *	Interrupt occures on the falling and/or rising edge of a hardware
@@ -560,8 +553,7 @@
  *	the handler was running. If all pending interrupts are handled, the
  *	loop is left.
  */
-void
-handle_edge_irq(unsigned int irq, struct irq_desc *desc)
+void handle_edge_irq(struct irq_desc *desc)
 {
 	raw_spin_lock(&desc->lock);
 
@@ -618,13 +610,12 @@
 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
 /**
  *	handle_edge_eoi_irq - edge eoi type IRQ handler
- *	@irq:	the interrupt number
  *	@desc:	the interrupt description structure for this irq
  *
  * Similar as the above handle_edge_irq, but using eoi and w/o the
  * mask/unmask logic.
  */
-void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
+void handle_edge_eoi_irq(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 
@@ -665,13 +656,11 @@
 
 /**
  *	handle_percpu_irq - Per CPU local irq handler
- *	@irq:	the interrupt number
  *	@desc:	the interrupt description structure for this irq
  *
  *	Per CPU interrupts on SMP machines without locking requirements
  */
-void
-handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
+void handle_percpu_irq(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 
@@ -688,7 +677,6 @@
 
 /**
  * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
- * @irq:	the interrupt number
  * @desc:	the interrupt description structure for this irq
  *
  * Per CPU interrupts on SMP machines without locking requirements. Same as
@@ -698,11 +686,12 @@
  * contain the real device id for the cpu on which this handler is
  * called
  */
-void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
+void handle_percpu_devid_irq(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct irqaction *action = desc->action;
 	void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
+	unsigned int irq = irq_desc_get_irq(desc);
 	irqreturn_t res;
 
 	kstat_incr_irqs_this_cpu(desc);
@@ -796,7 +785,7 @@
 		return;
 
 	__irq_do_set_handler(desc, handle, 1, NULL);
-	desc->irq_data.handler_data = data;
+	desc->irq_common_data.handler_data = data;
 
 	irq_put_desc_busunlock(desc, flags);
 }
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index b6eeea8..de41a68 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -27,8 +27,10 @@
  *
  * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
  */
-void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
+void handle_bad_irq(struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
+
 	print_irq_desc(irq, desc);
 	kstat_incr_irqs_this_cpu(desc);
 	ack_bad_irq(irq);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index eee4b38..5ef0c2d 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -194,7 +194,7 @@
 
 static inline int irq_desc_get_node(struct irq_desc *desc)
 {
-	return irq_data_get_node(&desc->irq_data);
+	return irq_common_data_get_node(&desc->irq_common_data);
 }
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 0a2a4b6..239e2ae 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -38,12 +38,13 @@
 #ifdef CONFIG_SMP
 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
 {
-	if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
+	if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
+				     gfp, node))
 		return -ENOMEM;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
 	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
-		free_cpumask_var(desc->irq_data.affinity);
+		free_cpumask_var(desc->irq_common_data.affinity);
 		return -ENOMEM;
 	}
 #endif
@@ -52,11 +53,13 @@
 
 static void desc_smp_init(struct irq_desc *desc, int node)
 {
-	desc->irq_data.node = node;
-	cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
+	cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity);
 #ifdef CONFIG_GENERIC_PENDING_IRQ
 	cpumask_clear(desc->pending_mask);
 #endif
+#ifdef CONFIG_NUMA
+	desc->irq_common_data.node = node;
+#endif
 }
 
 #else
@@ -70,12 +73,13 @@
 {
 	int cpu;
 
+	desc->irq_common_data.handler_data = NULL;
+	desc->irq_common_data.msi_desc = NULL;
+
 	desc->irq_data.common = &desc->irq_common_data;
 	desc->irq_data.irq = irq;
 	desc->irq_data.chip = &no_irq_chip;
 	desc->irq_data.chip_data = NULL;
-	desc->irq_data.handler_data = NULL;
-	desc->irq_data.msi_desc = NULL;
 	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
 	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
 	desc->handle_irq = handle_bad_irq;
@@ -121,7 +125,7 @@
 #ifdef CONFIG_GENERIC_PENDING_IRQ
 	free_cpumask_var(desc->pending_mask);
 #endif
-	free_cpumask_var(desc->irq_data.affinity);
+	free_cpumask_var(desc->irq_common_data.affinity);
 }
 #else
 static inline void free_masks(struct irq_desc *desc) { }
@@ -343,7 +347,7 @@
 
 	if (!desc)
 		return -EINVAL;
-	generic_handle_irq_desc(irq, desc);
+	generic_handle_irq_desc(desc);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(generic_handle_irq);
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 79baaf8..dc9d27c 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -844,7 +844,6 @@
 		child->parent_data = irq_data;
 		irq_data->irq = child->irq;
 		irq_data->common = child->common;
-		irq_data->node = child->node;
 		irq_data->domain = domain;
 	}
 
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ad1b064..4c21386 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -192,7 +192,7 @@
 	switch (ret) {
 	case IRQ_SET_MASK_OK:
 	case IRQ_SET_MASK_OK_DONE:
-		cpumask_copy(data->affinity, mask);
+		cpumask_copy(desc->irq_common_data.affinity, mask);
 	case IRQ_SET_MASK_OK_NOCOPY:
 		irq_set_thread_affinity(desc);
 		ret = 0;
@@ -304,7 +304,7 @@
 	if (irq_move_pending(&desc->irq_data))
 		irq_get_pending(cpumask, desc);
 	else
-		cpumask_copy(cpumask, desc->irq_data.affinity);
+		cpumask_copy(cpumask, desc->irq_common_data.affinity);
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 
 	notify->notify(notify, cpumask);
@@ -375,9 +375,9 @@
 	 * one of the targets is online.
 	 */
 	if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
-		if (cpumask_intersects(desc->irq_data.affinity,
+		if (cpumask_intersects(desc->irq_common_data.affinity,
 				       cpu_online_mask))
-			set = desc->irq_data.affinity;
+			set = desc->irq_common_data.affinity;
 		else
 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
 	}
@@ -829,8 +829,8 @@
 	 * This code is triggered unconditionally. Check the affinity
 	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
 	 */
-	if (desc->irq_data.affinity)
-		cpumask_copy(mask, desc->irq_data.affinity);
+	if (desc->irq_common_data.affinity)
+		cpumask_copy(mask, desc->irq_common_data.affinity);
 	else
 		valid = false;
 	raw_spin_unlock_irq(&desc->lock);
@@ -1761,6 +1761,7 @@
 	kfree(__free_percpu_irq(irq, dev_id));
 	chip_bus_sync_unlock(desc);
 }
+EXPORT_SYMBOL_GPL(free_percpu_irq);
 
 /**
  *	setup_percpu_irq - setup a per-cpu interrupt
@@ -1790,9 +1791,10 @@
  *	@devname: An ascii name for the claiming device
  *	@dev_id: A percpu cookie passed back to the handler function
  *
- *	This call allocates interrupt resources, but doesn't
- *	automatically enable the interrupt. It has to be done on each
- *	CPU using enable_percpu_irq().
+ *	This call allocates interrupt resources and enables the
+ *	interrupt on the local CPU. If the interrupt is supposed to be
+ *	enabled on other CPUs, it has to be done on each CPU using
+ *	enable_percpu_irq().
  *
  *	Dev_id must be globally unique. It is a per-cpu variable, and
  *	the handler gets called with the interrupted CPU's instance of
@@ -1831,6 +1833,7 @@
 
 	return retval;
 }
+EXPORT_SYMBOL_GPL(request_percpu_irq);
 
 /**
  *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 0e97c14..e3a8c95 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -39,7 +39,7 @@
 static int show_irq_affinity(int type, struct seq_file *m, void *v)
 {
 	struct irq_desc *desc = irq_to_desc((long)m->private);
-	const struct cpumask *mask = desc->irq_data.affinity;
+	const struct cpumask *mask = desc->irq_common_data.affinity;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
 	if (irqd_is_setaffinity_pending(&desc->irq_data))
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index dd95f44..b86886b 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -38,7 +38,7 @@
 		clear_bit(irq, irqs_resend);
 		desc = irq_to_desc(irq);
 		local_irq_disable();
-		desc->handle_irq(irq, desc);
+		desc->handle_irq(desc);
 		local_irq_enable();
 	}
 }
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 8acfbf7..4e49cc4 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3068,7 +3068,7 @@
 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 			  int trylock, int read, int check, int hardirqs_off,
 			  struct lockdep_map *nest_lock, unsigned long ip,
-			  int references)
+			  int references, int pin_count)
 {
 	struct task_struct *curr = current;
 	struct lock_class *class = NULL;
@@ -3157,7 +3157,7 @@
 	hlock->waittime_stamp = 0;
 	hlock->holdtime_stamp = lockstat_clock();
 #endif
-	hlock->pin_count = 0;
+	hlock->pin_count = pin_count;
 
 	if (check && !mark_irqflags(curr, hlock))
 		return 0;
@@ -3343,7 +3343,7 @@
 			hlock_class(hlock)->subclass, hlock->trylock,
 				hlock->read, hlock->check, hlock->hardirqs_off,
 				hlock->nest_lock, hlock->acquire_ip,
-				hlock->references))
+				hlock->references, hlock->pin_count))
 			return 0;
 	}
 
@@ -3433,7 +3433,7 @@
 			hlock_class(hlock)->subclass, hlock->trylock,
 				hlock->read, hlock->check, hlock->hardirqs_off,
 				hlock->nest_lock, hlock->acquire_ip,
-				hlock->references))
+				hlock->references, hlock->pin_count))
 			return 0;
 	}
 
@@ -3583,7 +3583,7 @@
 	current->lockdep_recursion = 1;
 	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
 	__lock_acquire(lock, subclass, trylock, read, check,
-		       irqs_disabled_flags(flags), nest_lock, ip, 0);
+		       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
 	current->lockdep_recursion = 0;
 	raw_local_irq_restore(flags);
 }
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 337c881..87e9ce6a 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -289,7 +289,7 @@
 	if (pv_enabled())
 		goto queue;
 
-	if (virt_queued_spin_lock(lock))
+	if (virt_spin_lock(lock))
 		return;
 
 	/*
diff --git a/kernel/membarrier.c b/kernel/membarrier.c
new file mode 100644
index 0000000..536c727
--- /dev/null
+++ b/kernel/membarrier.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010, 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * membarrier system call
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/syscalls.h>
+#include <linux/membarrier.h>
+
+/*
+ * Bitmask made from a "or" of all commands within enum membarrier_cmd,
+ * except MEMBARRIER_CMD_QUERY.
+ */
+#define MEMBARRIER_CMD_BITMASK	(MEMBARRIER_CMD_SHARED)
+
+/**
+ * sys_membarrier - issue memory barriers on a set of threads
+ * @cmd:   Takes command values defined in enum membarrier_cmd.
+ * @flags: Currently needs to be 0. For future extensions.
+ *
+ * If this system call is not implemented, -ENOSYS is returned. If the
+ * command specified does not exist, or if the command argument is invalid,
+ * this system call returns -EINVAL. For a given command, with flags argument
+ * set to 0, this system call is guaranteed to always return the same value
+ * until reboot.
+ *
+ * All memory accesses performed in program order from each targeted thread
+ * is guaranteed to be ordered with respect to sys_membarrier(). If we use
+ * the semantic "barrier()" to represent a compiler barrier forcing memory
+ * accesses to be performed in program order across the barrier, and
+ * smp_mb() to represent explicit memory barriers forcing full memory
+ * ordering across the barrier, we have the following ordering table for
+ * each pair of barrier(), sys_membarrier() and smp_mb():
+ *
+ * The pair ordering is detailed as (O: ordered, X: not ordered):
+ *
+ *                        barrier()   smp_mb() sys_membarrier()
+ *        barrier()          X           X            O
+ *        smp_mb()           X           O            O
+ *        sys_membarrier()   O           O            O
+ */
+SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
+{
+	if (unlikely(flags))
+		return -EINVAL;
+	switch (cmd) {
+	case MEMBARRIER_CMD_QUERY:
+		return MEMBARRIER_CMD_BITMASK;
+	case MEMBARRIER_CMD_SHARED:
+		if (num_online_cpus() > 1)
+			synchronize_sched();
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9f75f25..775d36c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3868,6 +3868,7 @@
 static void __init
 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
 {
+	static struct lock_class_key rcu_exp_sched_rdp_class;
 	unsigned long flags;
 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
 	struct rcu_node *rnp = rcu_get_root(rsp);
@@ -3883,6 +3884,10 @@
 	mutex_init(&rdp->exp_funnel_mutex);
 	rcu_boot_init_nocb_percpu_data(rdp);
 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	if (rsp == &rcu_sched_state)
+		lockdep_set_class_and_name(&rdp->exp_funnel_mutex,
+					   &rcu_exp_sched_rdp_class,
+					   "rcu_data_exp_sched");
 }
 
 /*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3595403..6159531 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -621,18 +621,21 @@
 	int i, cpu = smp_processor_id();
 	struct sched_domain *sd;
 
-	if (!idle_cpu(cpu))
+	if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
 		return cpu;
 
 	rcu_read_lock();
 	for_each_domain(cpu, sd) {
 		for_each_cpu(i, sched_domain_span(sd)) {
-			if (!idle_cpu(i)) {
+			if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
 				cpu = i;
 				goto unlock;
 			}
 		}
 	}
+
+	if (!is_housekeeping_cpu(cpu))
+		cpu = housekeeping_any_cpu();
 unlock:
 	rcu_read_unlock();
 	return cpu;
@@ -2666,13 +2669,20 @@
 
 /*
  * Check if only the current task is running on the cpu.
+ *
+ * Caution: this function does not check that the caller has disabled
+ * preemption, thus the result might have a time-of-check-to-time-of-use
+ * race.  The caller is responsible to use it correctly, for example:
+ *
+ * - from a non-preemptable section (of course)
+ *
+ * - from a thread that is bound to a single CPU
+ *
+ * - in a loop with very short iterations (e.g. a polling loop)
  */
 bool single_task_running(void)
 {
-	if (cpu_rq(smp_processor_id())->nr_running == 1)
-		return true;
-	else
-		return false;
+	return raw_rq()->nr_running == 1;
 }
 EXPORT_SYMBOL(single_task_running);
 
@@ -4924,7 +4934,15 @@
 	idle->state = TASK_RUNNING;
 	idle->se.exec_start = sched_clock();
 
-	do_set_cpus_allowed(idle, cpumask_of(cpu));
+#ifdef CONFIG_SMP
+	/*
+	 * Its possible that init_idle() gets called multiple times on a task,
+	 * in that case do_set_cpus_allowed() will not do the right thing.
+	 *
+	 * And since this is boot we can forgo the serialization.
+	 */
+	set_cpus_allowed_common(idle, cpumask_of(cpu));
+#endif
 	/*
 	 * We're having a chicken and egg problem, even though we are
 	 * holding rq->lock, the cpu isn't yet set to this cpu so the
@@ -4941,7 +4959,7 @@
 
 	rq->curr = rq->idle = idle;
 	idle->on_rq = TASK_ON_RQ_QUEUED;
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
 	idle->on_cpu = 1;
 #endif
 	raw_spin_unlock(&rq->lock);
@@ -4956,7 +4974,7 @@
 	idle->sched_class = &idle_sched_class;
 	ftrace_graph_init_idle_task(idle, cpu);
 	vtime_init_idle(idle, cpu);
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
 	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
 #endif
 }
@@ -5178,24 +5196,47 @@
 			break;
 
 		/*
-		 * Ensure rq->lock covers the entire task selection
-		 * until the migration.
+		 * pick_next_task assumes pinned rq->lock.
 		 */
 		lockdep_pin_lock(&rq->lock);
 		next = pick_next_task(rq, &fake_task);
 		BUG_ON(!next);
 		next->sched_class->put_prev_task(rq, next);
 
+		/*
+		 * Rules for changing task_struct::cpus_allowed are holding
+		 * both pi_lock and rq->lock, such that holding either
+		 * stabilizes the mask.
+		 *
+		 * Drop rq->lock is not quite as disastrous as it usually is
+		 * because !cpu_active at this point, which means load-balance
+		 * will not interfere. Also, stop-machine.
+		 */
+		lockdep_unpin_lock(&rq->lock);
+		raw_spin_unlock(&rq->lock);
+		raw_spin_lock(&next->pi_lock);
+		raw_spin_lock(&rq->lock);
+
+		/*
+		 * Since we're inside stop-machine, _nothing_ should have
+		 * changed the task, WARN if weird stuff happened, because in
+		 * that case the above rq->lock drop is a fail too.
+		 */
+		if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
+			raw_spin_unlock(&next->pi_lock);
+			continue;
+		}
+
 		/* Find suitable destination for @next, with force if needed. */
 		dest_cpu = select_fallback_rq(dead_rq->cpu, next);
 
-		lockdep_unpin_lock(&rq->lock);
 		rq = __migrate_task(rq, next, dest_cpu);
 		if (rq != dead_rq) {
 			raw_spin_unlock(&rq->lock);
 			rq = dead_rq;
 			raw_spin_lock(&rq->lock);
 		}
+		raw_spin_unlock(&next->pi_lock);
 	}
 
 	rq->stop = stop;
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 272d932..052e026 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -106,10 +106,9 @@
 }
 EXPORT_SYMBOL_GPL(__wake_up_locked);
 
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr,
-			  void *key)
+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
 {
-	__wake_up_common(q, mode, nr, 0, key);
+	__wake_up_common(q, mode, 1, 0, key);
 }
 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
 
@@ -284,7 +283,7 @@
 	if (!list_empty(&wait->task_list))
 		list_del_init(&wait->task_list);
 	else if (waitqueue_active(q))
-		__wake_up_locked_key(q, mode, 1, key);
+		__wake_up_locked_key(q, mode, key);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 EXPORT_SYMBOL(abort_exclusive_wait);
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 5bd4779..06858a7 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -370,7 +370,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
-					seccomp_check_filter);
+					seccomp_check_filter, false);
 	if (ret < 0) {
 		kfree(sfilter);
 		return ERR_PTR(ret);
@@ -469,7 +469,7 @@
 static inline void seccomp_filter_free(struct seccomp_filter *filter)
 {
 	if (filter) {
-		bpf_prog_free(filter->prog);
+		bpf_prog_destroy(filter->prog);
 		kfree(filter);
 	}
 }
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 03c3875..a02decf 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -245,3 +245,6 @@
 
 /* execveat */
 cond_syscall(sys_execveat);
+
+/* membarrier */
+cond_syscall(sys_membarrier);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e69201d..96c856b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -64,6 +64,7 @@
 #include <linux/binfmts.h>
 #include <linux/sched/sysctl.h>
 #include <linux/kexec.h>
+#include <linux/bpf.h>
 
 #include <asm/uaccess.h>
 #include <asm/processor.h>
@@ -1139,6 +1140,18 @@
 		.proc_handler	= timer_migration_handler,
 	},
 #endif
+#ifdef CONFIG_BPF_SYSCALL
+	{
+		.procname	= "unprivileged_bpf_disabled",
+		.data		= &sysctl_unprivileged_bpf_disabled,
+		.maxlen		= sizeof(sysctl_unprivileged_bpf_disabled),
+		.mode		= 0644,
+		/* only handle a transition from default "0" to "1" */
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &one,
+		.extra2		= &one,
+	},
+#endif
 	{ }
 };
 
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 50eb107..a9b76a4 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -97,20 +97,6 @@
 static int __clockevents_switch_state(struct clock_event_device *dev,
 				      enum clock_event_state state)
 {
-	/* Transition with legacy set_mode() callback */
-	if (dev->set_mode) {
-		/* Legacy callback doesn't support new modes */
-		if (state > CLOCK_EVT_STATE_ONESHOT)
-			return -ENOSYS;
-		/*
-		 * 'clock_event_state' and 'clock_event_mode' have 1-to-1
-		 * mapping until *_ONESHOT, and so a simple cast will work.
-		 */
-		dev->set_mode((enum clock_event_mode)state, dev);
-		dev->mode = (enum clock_event_mode)state;
-		return 0;
-	}
-
 	if (dev->features & CLOCK_EVT_FEAT_DUMMY)
 		return 0;
 
@@ -204,12 +190,8 @@
 {
 	int ret = 0;
 
-	if (dev->set_mode) {
-		dev->set_mode(CLOCK_EVT_MODE_RESUME, dev);
-		dev->mode = CLOCK_EVT_MODE_RESUME;
-	} else if (dev->tick_resume) {
+	if (dev->tick_resume)
 		ret = dev->tick_resume(dev);
-	}
 
 	return ret;
 }
@@ -460,26 +442,6 @@
 }
 EXPORT_SYMBOL_GPL(clockevents_unbind_device);
 
-/* Sanity check of state transition callbacks */
-static int clockevents_sanity_check(struct clock_event_device *dev)
-{
-	/* Legacy set_mode() callback */
-	if (dev->set_mode) {
-		/* We shouldn't be supporting new modes now */
-		WARN_ON(dev->set_state_periodic || dev->set_state_oneshot ||
-			dev->set_state_shutdown || dev->tick_resume ||
-			dev->set_state_oneshot_stopped);
-
-		BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
-		return 0;
-	}
-
-	if (dev->features & CLOCK_EVT_FEAT_DUMMY)
-		return 0;
-
-	return 0;
-}
-
 /**
  * clockevents_register_device - register a clock event device
  * @dev:	device to register
@@ -488,8 +450,6 @@
 {
 	unsigned long flags;
 
-	BUG_ON(clockevents_sanity_check(dev));
-
 	/* Initialize state to DETACHED */
 	clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
 
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index d11c55b..4fcd99e 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -398,7 +398,6 @@
 		 * the set mode function!
 		 */
 		clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
-		dev->mode = CLOCK_EVT_MODE_UNUSED;
 		clockevents_exchange_device(dev, NULL);
 		dev->event_handler = clockevents_handle_noop;
 		td->evtdev = NULL;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3319e16..7c7ec451 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -290,16 +290,17 @@
 __setup("nohz_full=", tick_nohz_full_setup);
 
 static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
-						 unsigned long action,
-						 void *hcpu)
+				       unsigned long action,
+				       void *hcpu)
 {
 	unsigned int cpu = (unsigned long)hcpu;
 
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_DOWN_PREPARE:
 		/*
-		 * If we handle the timekeeping duty for full dynticks CPUs,
-		 * we can't safely shutdown that CPU.
+		 * The boot CPU handles housekeeping duty (unbound timers,
+		 * workqueues, timekeeping, ...) on behalf of full dynticks
+		 * CPUs. It must remain online when nohz full is enabled.
 		 */
 		if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
 			return NOTIFY_BAD;
@@ -370,6 +371,12 @@
 	cpu_notifier(tick_nohz_cpu_down_callback, 0);
 	pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
 		cpumask_pr_args(tick_nohz_full_mask));
+
+	/*
+	 * We need at least one CPU to handle housekeeping work such
+	 * as timekeeping, unbound timers, workqueues, ...
+	 */
+	WARN_ON_ONCE(cpumask_empty(housekeeping_mask));
 }
 #endif
 
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f6ee2e6..3739ac6 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1614,7 +1614,7 @@
 	negative = (tick_error < 0);
 
 	/* Sort out the magnitude of the correction */
-	tick_error = abs(tick_error);
+	tick_error = abs64(tick_error);
 	for (adj = 0; tick_error > interval; adj++)
 		tick_error >>= 1;
 
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 129c960..f75e35b 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -225,7 +225,7 @@
 		   (unsigned long long) dev->min_delta_ns);
 	SEQ_printf(m, " mult:           %u\n", dev->mult);
 	SEQ_printf(m, " shift:          %u\n", dev->shift);
-	SEQ_printf(m, " mode:           %d\n", dev->mode);
+	SEQ_printf(m, " mode:           %d\n", clockevent_get_state(dev));
 	SEQ_printf(m, " next_event:     %Ld nsecs\n",
 		   (unsigned long long) ktime_to_ns(dev->next_event));
 
@@ -233,40 +233,34 @@
 	print_name_offset(m, dev->set_next_event);
 	SEQ_printf(m, "\n");
 
-	if (dev->set_mode) {
-		SEQ_printf(m, " set_mode:       ");
-		print_name_offset(m, dev->set_mode);
+	if (dev->set_state_shutdown) {
+		SEQ_printf(m, " shutdown: ");
+		print_name_offset(m, dev->set_state_shutdown);
 		SEQ_printf(m, "\n");
-	} else {
-		if (dev->set_state_shutdown) {
-			SEQ_printf(m, " shutdown: ");
-			print_name_offset(m, dev->set_state_shutdown);
-			SEQ_printf(m, "\n");
-		}
+	}
 
-		if (dev->set_state_periodic) {
-			SEQ_printf(m, " periodic: ");
-			print_name_offset(m, dev->set_state_periodic);
-			SEQ_printf(m, "\n");
-		}
+	if (dev->set_state_periodic) {
+		SEQ_printf(m, " periodic: ");
+		print_name_offset(m, dev->set_state_periodic);
+		SEQ_printf(m, "\n");
+	}
 
-		if (dev->set_state_oneshot) {
-			SEQ_printf(m, " oneshot:  ");
-			print_name_offset(m, dev->set_state_oneshot);
-			SEQ_printf(m, "\n");
-		}
+	if (dev->set_state_oneshot) {
+		SEQ_printf(m, " oneshot:  ");
+		print_name_offset(m, dev->set_state_oneshot);
+		SEQ_printf(m, "\n");
+	}
 
-		if (dev->set_state_oneshot_stopped) {
-			SEQ_printf(m, " oneshot stopped: ");
-			print_name_offset(m, dev->set_state_oneshot_stopped);
-			SEQ_printf(m, "\n");
-		}
+	if (dev->set_state_oneshot_stopped) {
+		SEQ_printf(m, " oneshot stopped: ");
+		print_name_offset(m, dev->set_state_oneshot_stopped);
+		SEQ_printf(m, "\n");
+	}
 
-		if (dev->tick_resume) {
-			SEQ_printf(m, " resume:   ");
-			print_name_offset(m, dev->tick_resume);
-			SEQ_printf(m, "\n");
-		}
+	if (dev->tick_resume) {
+		SEQ_printf(m, " resume:   ");
+		print_name_offset(m, dev->tick_resume);
+		SEQ_printf(m, "\n");
 	}
 
 	SEQ_printf(m, " event_handler:  ");
diff --git a/lib/Makefile b/lib/Makefile
index 13a7c6a..8de3b01 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,7 +26,8 @@
 	 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
 	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
 	 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
-	 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
+	 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
+	 once.o
 obj-y += string_helpers.o
 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
 obj-y += hexdump.o
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index ff19f66..b1c93e9 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -21,8 +21,7 @@
 
 static inline bool need_flush(struct iommu_map_table *iommu)
 {
-	return (iommu->lazy_flush != NULL &&
-		(iommu->flags & IOMMU_NEED_FLUSH) != 0);
+	return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
 }
 
 static inline void set_flush(struct iommu_map_table *iommu)
@@ -211,7 +210,8 @@
 			goto bail;
 		}
 	}
-	if (n < pool->hint || need_flush(iommu)) {
+	if (iommu->lazy_flush &&
+	    (n < pool->hint || need_flush(iommu))) {
 		clear_flush(iommu);
 		iommu->lazy_flush(iommu);
 	}
diff --git a/lib/once.c b/lib/once.c
new file mode 100644
index 0000000..05c8604
--- /dev/null
+++ b/lib/once.c
@@ -0,0 +1,62 @@
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/once.h>
+#include <linux/random.h>
+
+struct once_work {
+	struct work_struct work;
+	struct static_key *key;
+};
+
+static void once_deferred(struct work_struct *w)
+{
+	struct once_work *work;
+
+	work = container_of(w, struct once_work, work);
+	BUG_ON(!static_key_enabled(work->key));
+	static_key_slow_dec(work->key);
+	kfree(work);
+}
+
+static void once_disable_jump(struct static_key *key)
+{
+	struct once_work *w;
+
+	w = kmalloc(sizeof(*w), GFP_ATOMIC);
+	if (!w)
+		return;
+
+	INIT_WORK(&w->work, once_deferred);
+	w->key = key;
+	schedule_work(&w->work);
+}
+
+static DEFINE_SPINLOCK(once_lock);
+
+bool __do_once_start(bool *done, unsigned long *flags)
+	__acquires(once_lock)
+{
+	spin_lock_irqsave(&once_lock, *flags);
+	if (*done) {
+		spin_unlock_irqrestore(&once_lock, *flags);
+		/* Keep sparse happy by restoring an even lock count on
+		 * this lock. In case we return here, we don't call into
+		 * __do_once_done but return early in the DO_ONCE() macro.
+		 */
+		__acquire(once_lock);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(__do_once_start);
+
+void __do_once_done(bool *done, struct static_key *once_key,
+		    unsigned long *flags)
+	__releases(once_lock)
+{
+	*done = true;
+	spin_unlock_irqrestore(&once_lock, *flags);
+	once_disable_jump(once_key);
+}
+EXPORT_SYMBOL(__do_once_done);
diff --git a/lib/random32.c b/lib/random32.c
index 0bee183..1211191 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -181,7 +181,7 @@
 	 * No locking on the CPUs, but then somewhat random results are, well,
 	 * expected.
 	 */
-	for_each_possible_cpu (i) {
+	for_each_possible_cpu(i) {
 		struct rnd_state *state = &per_cpu(net_rand_state, i);
 
 		state->s1 = __seed(state->s1 ^ entropy, 2U);
@@ -201,7 +201,7 @@
 	prandom_state_selftest();
 
 	for_each_possible_cpu(i) {
-		struct rnd_state *state = &per_cpu(net_rand_state,i);
+		struct rnd_state *state = &per_cpu(net_rand_state, i);
 		u32 weak_seed = (i + jiffies) ^ random_get_entropy();
 
 		prandom_seed_early(state, weak_seed, true);
@@ -238,13 +238,30 @@
 	add_timer(&seed_timer);
 }
 
+void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
+{
+	int i;
+
+	for_each_possible_cpu(i) {
+		struct rnd_state *state = per_cpu_ptr(pcpu_state, i);
+		u32 seeds[4];
+
+		get_random_bytes(&seeds, sizeof(seeds));
+		state->s1 = __seed(seeds[0],   2U);
+		state->s2 = __seed(seeds[1],   8U);
+		state->s3 = __seed(seeds[2],  16U);
+		state->s4 = __seed(seeds[3], 128U);
+
+		prandom_warmup(state);
+	}
+}
+
 /*
  *	Generate better values after random number generator
  *	is fully initialized.
  */
 static void __prandom_reseed(bool late)
 {
-	int i;
 	unsigned long flags;
 	static bool latch = false;
 	static DEFINE_SPINLOCK(lock);
@@ -266,19 +283,7 @@
 		goto out;
 
 	latch = true;
-
-	for_each_possible_cpu(i) {
-		struct rnd_state *state = &per_cpu(net_rand_state,i);
-		u32 seeds[4];
-
-		get_random_bytes(&seeds, sizeof(seeds));
-		state->s1 = __seed(seeds[0],   2U);
-		state->s2 = __seed(seeds[1],   8U);
-		state->s3 = __seed(seeds[2],  16U);
-		state->s4 = __seed(seeds[3], 128U);
-
-		prandom_warmup(state);
-	}
+	prandom_seed_full_state(&net_rand_state);
 out:
 	spin_unlock_irqrestore(&lock, flags);
 }
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index cc0c697..a54ff89 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -187,10 +187,7 @@
 	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
 				      new_tbl, new_hash);
 
-	if (rht_is_a_nulls(head))
-		INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
-	else
-		RCU_INIT_POINTER(entry->next, head);
+	RCU_INIT_POINTER(entry->next, head);
 
 	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
 	spin_unlock(new_bucket_lock);
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 54036ce..5939f63 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -59,7 +59,11 @@
 	}
 
 	exp = divisor[units] / (u32)blk_size;
-	if (size >= exp) {
+	/*
+	 * size must be strictly greater than exp here to ensure that remainder
+	 * is greater than divisor[units] coming out of the if below.
+	 */
+	if (size > exp) {
 		remainder = do_div(size, divisor[units]);
 		remainder *= blk_size;
 		i++;
diff --git a/mm/Kconfig b/mm/Kconfig
index 6413d02..0d9fdcd 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -677,3 +677,6 @@
 	  mapping in an O_DIRECT operation, among other things.
 
 	  If FS_DAX is enabled, then say Y.
+
+config FRAME_VECTOR
+	bool
diff --git a/mm/Makefile b/mm/Makefile
index 56f8eed..2ed4319 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -80,3 +80,4 @@
 obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o
 obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
 obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
+obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 71a8998..312a716 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -394,7 +394,7 @@
 	list_for_each_entry(page, &pool->page_list, page_list) {
 		if (dma < page->dma)
 			continue;
-		if (dma < (page->dma + pool->allocation))
+		if ((dma - page->dma) < pool->allocation)
 			return page;
 	}
 	return NULL;
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
index 23f744d..17ae14b 100644
--- a/mm/early_ioremap.c
+++ b/mm/early_ioremap.c
@@ -15,6 +15,7 @@
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
 #include <asm/fixmap.h>
+#include <asm/early_ioremap.h>
 
 #ifdef CONFIG_MMU
 static int early_ioremap_debug __initdata;
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
new file mode 100644
index 0000000..cdabcb9
--- /dev/null
+++ b/mm/frame_vector.c
@@ -0,0 +1,230 @@
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+
+/*
+ * get_vaddr_frames() - map virtual addresses to pfns
+ * @start:	starting user address
+ * @nr_frames:	number of pages / pfns from start to map
+ * @write:	whether pages will be written to by the caller
+ * @force:	whether to force write access even if user mapping is
+ *		readonly. See description of the same argument of
+		get_user_pages().
+ * @vec:	structure which receives pages / pfns of the addresses mapped.
+ *		It should have space for at least nr_frames entries.
+ *
+ * This function maps virtual addresses from @start and fills @vec structure
+ * with page frame numbers or page pointers to corresponding pages (choice
+ * depends on the type of the vma underlying the virtual address). If @start
+ * belongs to a normal vma, the function grabs reference to each of the pages
+ * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't
+ * touch page structures and the caller must make sure pfns aren't reused for
+ * anything else while he is using them.
+ *
+ * The function returns number of pages mapped which may be less than
+ * @nr_frames. In particular we stop mapping if there are more vmas of
+ * different type underlying the specified range of virtual addresses.
+ * When the function isn't able to map a single page, it returns error.
+ *
+ * This function takes care of grabbing mmap_sem as necessary.
+ */
+int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
+		     bool write, bool force, struct frame_vector *vec)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	int ret = 0;
+	int err;
+	int locked;
+
+	if (nr_frames == 0)
+		return 0;
+
+	if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
+		nr_frames = vec->nr_allocated;
+
+	down_read(&mm->mmap_sem);
+	locked = 1;
+	vma = find_vma_intersection(mm, start, start + 1);
+	if (!vma) {
+		ret = -EFAULT;
+		goto out;
+	}
+	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
+		vec->got_ref = true;
+		vec->is_pfns = false;
+		ret = get_user_pages_locked(current, mm, start, nr_frames,
+			write, force, (struct page **)(vec->ptrs), &locked);
+		goto out;
+	}
+
+	vec->got_ref = false;
+	vec->is_pfns = true;
+	do {
+		unsigned long *nums = frame_vector_pfns(vec);
+
+		while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) {
+			err = follow_pfn(vma, start, &nums[ret]);
+			if (err) {
+				if (ret == 0)
+					ret = err;
+				goto out;
+			}
+			start += PAGE_SIZE;
+			ret++;
+		}
+		/*
+		 * We stop if we have enough pages or if VMA doesn't completely
+		 * cover the tail page.
+		 */
+		if (ret >= nr_frames || start < vma->vm_end)
+			break;
+		vma = find_vma_intersection(mm, start, start + 1);
+	} while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP));
+out:
+	if (locked)
+		up_read(&mm->mmap_sem);
+	if (!ret)
+		ret = -EFAULT;
+	if (ret > 0)
+		vec->nr_frames = ret;
+	return ret;
+}
+EXPORT_SYMBOL(get_vaddr_frames);
+
+/**
+ * put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired
+ *			them
+ * @vec:	frame vector to put
+ *
+ * Drop references to pages if get_vaddr_frames() acquired them. We also
+ * invalidate the frame vector so that it is prepared for the next call into
+ * get_vaddr_frames().
+ */
+void put_vaddr_frames(struct frame_vector *vec)
+{
+	int i;
+	struct page **pages;
+
+	if (!vec->got_ref)
+		goto out;
+	pages = frame_vector_pages(vec);
+	/*
+	 * frame_vector_pages() might needed to do a conversion when
+	 * get_vaddr_frames() got pages but vec was later converted to pfns.
+	 * But it shouldn't really fail to convert pfns back...
+	 */
+	if (WARN_ON(IS_ERR(pages)))
+		goto out;
+	for (i = 0; i < vec->nr_frames; i++)
+		put_page(pages[i]);
+	vec->got_ref = false;
+out:
+	vec->nr_frames = 0;
+}
+EXPORT_SYMBOL(put_vaddr_frames);
+
+/**
+ * frame_vector_to_pages - convert frame vector to contain page pointers
+ * @vec:	frame vector to convert
+ *
+ * Convert @vec to contain array of page pointers.  If the conversion is
+ * successful, return 0. Otherwise return an error. Note that we do not grab
+ * page references for the page structures.
+ */
+int frame_vector_to_pages(struct frame_vector *vec)
+{
+	int i;
+	unsigned long *nums;
+	struct page **pages;
+
+	if (!vec->is_pfns)
+		return 0;
+	nums = frame_vector_pfns(vec);
+	for (i = 0; i < vec->nr_frames; i++)
+		if (!pfn_valid(nums[i]))
+			return -EINVAL;
+	pages = (struct page **)nums;
+	for (i = 0; i < vec->nr_frames; i++)
+		pages[i] = pfn_to_page(nums[i]);
+	vec->is_pfns = false;
+	return 0;
+}
+EXPORT_SYMBOL(frame_vector_to_pages);
+
+/**
+ * frame_vector_to_pfns - convert frame vector to contain pfns
+ * @vec:	frame vector to convert
+ *
+ * Convert @vec to contain array of pfns.
+ */
+void frame_vector_to_pfns(struct frame_vector *vec)
+{
+	int i;
+	unsigned long *nums;
+	struct page **pages;
+
+	if (vec->is_pfns)
+		return;
+	pages = (struct page **)(vec->ptrs);
+	nums = (unsigned long *)pages;
+	for (i = 0; i < vec->nr_frames; i++)
+		nums[i] = page_to_pfn(pages[i]);
+	vec->is_pfns = true;
+}
+EXPORT_SYMBOL(frame_vector_to_pfns);
+
+/**
+ * frame_vector_create() - allocate & initialize structure for pinned pfns
+ * @nr_frames:	number of pfns slots we should reserve
+ *
+ * Allocate and initialize struct pinned_pfns to be able to hold @nr_pfns
+ * pfns.
+ */
+struct frame_vector *frame_vector_create(unsigned int nr_frames)
+{
+	struct frame_vector *vec;
+	int size = sizeof(struct frame_vector) + sizeof(void *) * nr_frames;
+
+	if (WARN_ON_ONCE(nr_frames == 0))
+		return NULL;
+	/*
+	 * This is absurdly high. It's here just to avoid strange effects when
+	 * arithmetics overflows.
+	 */
+	if (WARN_ON_ONCE(nr_frames > INT_MAX / sizeof(void *) / 2))
+		return NULL;
+	/*
+	 * Avoid higher order allocations, use vmalloc instead. It should
+	 * be rare anyway.
+	 */
+	if (size <= PAGE_SIZE)
+		vec = kmalloc(size, GFP_KERNEL);
+	else
+		vec = vmalloc(size);
+	if (!vec)
+		return NULL;
+	vec->nr_allocated = nr_frames;
+	vec->nr_frames = 0;
+	return vec;
+}
+EXPORT_SYMBOL(frame_vector_create);
+
+/**
+ * frame_vector_destroy() - free memory allocated to carry frame vector
+ * @vec:	Frame vector to free
+ *
+ * Free structure allocated by frame_vector_create() to carry frames.
+ */
+void frame_vector_destroy(struct frame_vector *vec)
+{
+	/* Make sure put_vaddr_frames() got called properly... */
+	VM_BUG_ON(vec->nr_frames > 0);
+	kvfree(vec);
+}
+EXPORT_SYMBOL(frame_vector_destroy);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 999fb0a..9cc7734 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3202,6 +3202,14 @@
 			continue;
 
 		/*
+		 * Shared VMAs have their own reserves and do not affect
+		 * MAP_PRIVATE accounting but it is possible that a shared
+		 * VMA is using the same page so check and skip such VMAs.
+		 */
+		if (iter_vma->vm_flags & VM_MAYSHARE)
+			continue;
+
+		/*
 		 * Unmap the page from other VMAs without their own reserves.
 		 * They get marked to be SIGKILLed if they fault in these
 		 * areas. This is because a future no-page fault on this VMA
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 7b28e9cd..8da2114 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -135,12 +135,11 @@
 
 	if (unlikely(*shadow_addr)) {
 		u16 shadow_first_bytes = *(u16 *)shadow_addr;
-		s8 last_byte = (addr + 15) & KASAN_SHADOW_MASK;
 
 		if (unlikely(shadow_first_bytes))
 			return true;
 
-		if (likely(!last_byte))
+		if (likely(IS_ALIGNED(addr, 8)))
 			return false;
 
 		return memory_is_poisoned_1(addr + 15);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6ddaeba..1fedbde 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -644,12 +644,14 @@
 }
 
 /*
+ * Return page count for single (non recursive) @memcg.
+ *
  * Implementation Note: reading percpu statistics for memcg.
  *
  * Both of vmstat[] and percpu_counter has threshold and do periodic
  * synchronization to implement "quick" read. There are trade-off between
  * reading cost and precision of value. Then, we may have a chance to implement
- * a periodic synchronizion of counter in memcg's counter.
+ * a periodic synchronization of counter in memcg's counter.
  *
  * But this _read() function is used for user interface now. The user accounts
  * memory usage by memory cgroup and he _always_ requires exact value because
@@ -659,17 +661,24 @@
  *
  * If there are kernel internal actions which can make use of some not-exact
  * value, and reading all cpu value can be performance bottleneck in some
- * common workload, threashold and synchonization as vmstat[] should be
+ * common workload, threshold and synchronization as vmstat[] should be
  * implemented.
  */
-static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
-				 enum mem_cgroup_stat_index idx)
+static unsigned long
+mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
 {
 	long val = 0;
 	int cpu;
 
+	/* Per-cpu values can be negative, use a signed accumulator */
 	for_each_possible_cpu(cpu)
 		val += per_cpu(memcg->stat->count[idx], cpu);
+	/*
+	 * Summing races with updates, so val may be negative.  Avoid exposing
+	 * transient negative values.
+	 */
+	if (val < 0)
+		val = 0;
 	return val;
 }
 
@@ -1254,7 +1263,7 @@
 		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
 			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 				continue;
-			pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
+			pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
 				K(mem_cgroup_read_stat(iter, i)));
 		}
 
@@ -2819,14 +2828,11 @@
 			       enum mem_cgroup_stat_index idx)
 {
 	struct mem_cgroup *iter;
-	long val = 0;
+	unsigned long val = 0;
 
-	/* Per-cpu values can be negative, use a signed accumulator */
 	for_each_mem_cgroup_tree(iter, memcg)
 		val += mem_cgroup_read_stat(iter, idx);
 
-	if (val < 0) /* race ? */
-		val = 0;
 	return val;
 }
 
@@ -3169,7 +3175,7 @@
 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 			continue;
-		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
+		seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
 			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
 	}
 
@@ -3194,13 +3200,13 @@
 			   (u64)memsw * PAGE_SIZE);
 
 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-		long long val = 0;
+		unsigned long long val = 0;
 
 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 			continue;
 		for_each_mem_cgroup_tree(mi, memcg)
 			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
-		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
+		seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
 	}
 
 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
@@ -4179,7 +4185,6 @@
 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
 		goto out_free_stat;
 
-	spin_lock_init(&memcg->pcp_counter_lock);
 	return memcg;
 
 out_free_stat:
diff --git a/mm/migrate.c b/mm/migrate.c
index c3cb566..842ecd7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -740,6 +740,15 @@
 	if (PageSwapBacked(page))
 		SetPageSwapBacked(newpage);
 
+	/*
+	 * Indirectly called below, migrate_page_copy() copies PG_dirty and thus
+	 * needs newpage's memcg set to transfer memcg dirty page accounting.
+	 * So perform memcg migration in two steps:
+	 * 1. set newpage->mem_cgroup (here)
+	 * 2. clear page->mem_cgroup (below)
+	 */
+	set_page_memcg(newpage, page_memcg(page));
+
 	mapping = page_mapping(page);
 	if (!mapping)
 		rc = migrate_page(mapping, newpage, page, mode);
@@ -756,9 +765,10 @@
 		rc = fallback_migrate_page(mapping, newpage, page, mode);
 
 	if (rc != MIGRATEPAGE_SUCCESS) {
+		set_page_memcg(newpage, NULL);
 		newpage->mapping = NULL;
 	} else {
-		mem_cgroup_migrate(page, newpage, false);
+		set_page_memcg(page, NULL);
 		if (page_was_mapped)
 			remove_migration_ptes(page, newpage);
 		page->mapping = NULL;
@@ -1075,7 +1085,7 @@
 	if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
 		put_new_page(new_hpage, private);
 	else
-		put_page(new_hpage);
+		putback_active_hugepage(new_hpage);
 
 	if (result) {
 		if (rc)
diff --git a/mm/mmap.c b/mm/mmap.c
index 971dd2c..79bcc9f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -612,8 +612,6 @@
 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
 		struct rb_node **rb_link, struct rb_node *rb_parent)
 {
-	WARN_ONCE(vma->vm_file && !vma->vm_ops, "missing vma->vm_ops");
-
 	/* Update tracking information for the gap following the new vma. */
 	if (vma->vm_next)
 		vma_gap_update(vma->vm_next);
@@ -1492,13 +1490,14 @@
 int vma_wants_writenotify(struct vm_area_struct *vma)
 {
 	vm_flags_t vm_flags = vma->vm_flags;
+	const struct vm_operations_struct *vm_ops = vma->vm_ops;
 
 	/* If it was private or non-writable, the write bit is already clear */
 	if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
 		return 0;
 
 	/* The backer wishes to know when pages are first written to? */
-	if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+	if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite))
 		return 1;
 
 	/* The open routine did something to the protections that pgprot_modify
@@ -1638,12 +1637,6 @@
 		 */
 		WARN_ON_ONCE(addr != vma->vm_start);
 
-		/* All file mapping must have ->vm_ops set */
-		if (!vma->vm_ops) {
-			static const struct vm_operations_struct dummy_ops = {};
-			vma->vm_ops = &dummy_ops;
-		}
-
 		addr = vma->vm_start;
 		vm_flags = vma->vm_flags;
 	} else if (vm_flags & VM_SHARED) {
diff --git a/mm/slab.c b/mm/slab.c
index c77ebe6..4fcc5dd 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2190,9 +2190,16 @@
 			size += BYTES_PER_WORD;
 	}
 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
-	if (size >= kmalloc_size(INDEX_NODE + 1)
-	    && cachep->object_size > cache_line_size()
-	    && ALIGN(size, cachep->align) < PAGE_SIZE) {
+	/*
+	 * To activate debug pagealloc, off-slab management is necessary
+	 * requirement. In early phase of initialization, small sized slab
+	 * doesn't get initialized so it would not be possible. So, we need
+	 * to check size >= 256. It guarantees that all necessary small
+	 * sized slab is initialized in current slab initialization sequence.
+	 */
+	if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
+		size >= 256 && cachep->object_size > cache_line_size() &&
+		ALIGN(size, cachep->align) < PAGE_SIZE) {
 		cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
 		size = PAGE_SIZE;
 	}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2d978b2..7f63a93 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -175,7 +175,7 @@
 	if (!memcg)
 		return true;
 #ifdef CONFIG_CGROUP_WRITEBACK
-	if (memcg->css.cgroup)
+	if (cgroup_on_dfl(memcg->css.cgroup))
 		return true;
 #endif
 	return false;
diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c
index ae1896f..83b19e0 100644
--- a/net/6lowpan/core.c
+++ b/net/6lowpan/core.c
@@ -17,6 +17,11 @@
 
 void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype)
 {
+	dev->addr_len = EUI64_ADDR_LEN;
+	dev->type = ARPHRD_6LOWPAN;
+	dev->mtu = IPV6_MIN_MTU;
+	dev->priv_flags |= IFF_NO_QUEUE;
+
 	lowpan_priv(dev)->lltype = lltype;
 }
 EXPORT_SYMBOL(lowpan_netdev_setup);
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 1e0071f..78c8a49 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -366,7 +366,18 @@
 			return err;
 	}
 
-	hdr.payload_len = htons(skb->len);
+	switch (lowpan_priv(dev)->lltype) {
+	case LOWPAN_LLTYPE_IEEE802154:
+		if (lowpan_802154_cb(skb)->d_size)
+			hdr.payload_len = htons(lowpan_802154_cb(skb)->d_size -
+						sizeof(struct ipv6hdr));
+		else
+			hdr.payload_len = htons(skb->len);
+		break;
+	default:
+		hdr.payload_len = htons(skb->len);
+		break;
+	}
 
 	pr_debug("skb headroom size = %d, data length = %d\n",
 		 skb_headroom(skb), skb->len);
diff --git a/net/6lowpan/nhc.h b/net/6lowpan/nhc.h
index ed44938..c249f17 100644
--- a/net/6lowpan/nhc.h
+++ b/net/6lowpan/nhc.h
@@ -8,8 +8,6 @@
 #include <net/6lowpan.h>
 #include <net/ipv6.h>
 
-#define LOWPAN_NHC_MAX_ID_LEN	1
-
 /**
  * LOWPAN_NHC - helper macro to generate nh id fields and lowpan_nhc struct
  *
diff --git a/net/6lowpan/nhc_udp.c b/net/6lowpan/nhc_udp.c
index c6bcaeb..72d0b57 100644
--- a/net/6lowpan/nhc_udp.c
+++ b/net/6lowpan/nhc_udp.c
@@ -71,7 +71,18 @@
 	 * here, we obtain the hint from the remaining size of the
 	 * frame
 	 */
-	uh.len = htons(skb->len + sizeof(struct udphdr));
+	switch (lowpan_priv(skb->dev)->lltype) {
+	case LOWPAN_LLTYPE_IEEE802154:
+		if (lowpan_802154_cb(skb)->d_size)
+			uh.len = htons(lowpan_802154_cb(skb)->d_size -
+				       sizeof(struct ipv6hdr));
+		else
+			uh.len = htons(skb->len + sizeof(struct udphdr));
+		break;
+	default:
+		uh.len = htons(skb->len + sizeof(struct udphdr));
+		break;
+	}
 	pr_debug("uncompressed UDP length: src = %d", ntohs(uh.len));
 
 	/* replace the compressed UDP head by the uncompressed UDP
diff --git a/net/Kconfig b/net/Kconfig
index 7021c1b..127da94 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -232,6 +232,7 @@
 source "net/mpls/Kconfig"
 source "net/hsr/Kconfig"
 source "net/switchdev/Kconfig"
+source "net/l3mdev/Kconfig"
 
 config RPS
 	bool
diff --git a/net/Makefile b/net/Makefile
index 3995613..a5d0409 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -74,3 +74,6 @@
 ifneq ($(CONFIG_NET_SWITCHDEV),)
 obj-y				+= switchdev/
 endif
+ifneq ($(CONFIG_NET_L3_MASTER_DEV),)
+obj-y				+= l3mdev/
+endif
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 17e55df..e07f551 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -317,6 +317,9 @@
 
 static int clip_encap(struct atm_vcc *vcc, int mode)
 {
+	if (!CLIP_VCC(vcc))
+		return -EBADFD;
+
 	CLIP_VCC(vcc)->encap = mode;
 	return 0;
 }
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 131e79c..db73b8a 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -35,7 +35,6 @@
 static struct dentry *lowpan_control_debugfs;
 
 #define IFACE_NAME_TEMPLATE "bt%d"
-#define EUI64_ADDR_LEN 8
 
 struct skb_cb {
 	struct in6_addr addr;
@@ -674,13 +673,8 @@
 
 static void netdev_setup(struct net_device *dev)
 {
-	dev->addr_len		= EUI64_ADDR_LEN;
-	dev->type		= ARPHRD_6LOWPAN;
-
 	dev->hard_header_len	= 0;
 	dev->needed_tailroom	= 0;
-	dev->mtu		= IPV6_MIN_MTU;
-	dev->tx_queue_len	= 0;
 	dev->flags		= IFF_RUNNING | IFF_POINTOPOINT |
 				  IFF_MULTICAST;
 	dev->watchdog_timeo	= 0;
@@ -775,24 +769,7 @@
 
 	chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
 	chan->mode = L2CAP_MODE_LE_FLOWCTL;
-	chan->omtu = 65535;
-	chan->imtu = chan->omtu;
-
-	return chan;
-}
-
-static struct l2cap_chan *chan_open(struct l2cap_chan *pchan)
-{
-	struct l2cap_chan *chan;
-
-	chan = chan_create();
-	if (!chan)
-		return NULL;
-
-	chan->remote_mps = chan->omtu;
-	chan->mps = chan->omtu;
-
-	chan->state = BT_CONNECTED;
+	chan->imtu = 1280;
 
 	return chan;
 }
@@ -919,7 +896,10 @@
 {
 	struct l2cap_chan *chan;
 
-	chan = chan_open(pchan);
+	chan = chan_create();
+	if (!chan)
+		return NULL;
+
 	chan->ops = pchan->ops;
 
 	BT_DBG("chan %p pchan %p", chan, pchan);
@@ -1065,34 +1045,23 @@
 		return BDADDR_LE_RANDOM;
 }
 
-static struct l2cap_chan *chan_get(void)
-{
-	struct l2cap_chan *pchan;
-
-	pchan = chan_create();
-	if (!pchan)
-		return NULL;
-
-	pchan->ops = &bt_6lowpan_chan_ops;
-
-	return pchan;
-}
-
 static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
 {
-	struct l2cap_chan *pchan;
+	struct l2cap_chan *chan;
 	int err;
 
-	pchan = chan_get();
-	if (!pchan)
+	chan = chan_create();
+	if (!chan)
 		return -EINVAL;
 
-	err = l2cap_chan_connect(pchan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
+	chan->ops = &bt_6lowpan_chan_ops;
+
+	err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
 				 addr, dst_type);
 
-	BT_DBG("chan %p err %d", pchan, err);
+	BT_DBG("chan %p err %d", chan, err);
 	if (err < 0)
-		l2cap_chan_put(pchan);
+		l2cap_chan_put(chan);
 
 	return err;
 }
@@ -1117,31 +1086,32 @@
 static struct l2cap_chan *bt_6lowpan_listen(void)
 {
 	bdaddr_t *addr = BDADDR_ANY;
-	struct l2cap_chan *pchan;
+	struct l2cap_chan *chan;
 	int err;
 
 	if (!enable_6lowpan)
 		return NULL;
 
-	pchan = chan_get();
-	if (!pchan)
+	chan = chan_create();
+	if (!chan)
 		return NULL;
 
-	pchan->state = BT_LISTEN;
-	pchan->src_type = BDADDR_LE_PUBLIC;
+	chan->ops = &bt_6lowpan_chan_ops;
+	chan->state = BT_LISTEN;
+	chan->src_type = BDADDR_LE_PUBLIC;
 
-	atomic_set(&pchan->nesting, L2CAP_NESTING_PARENT);
+	atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
 
-	BT_DBG("chan %p src type %d", pchan, pchan->src_type);
+	BT_DBG("chan %p src type %d", chan, chan->src_type);
 
-	err = l2cap_add_psm(pchan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
+	err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
 	if (err) {
-		l2cap_chan_put(pchan);
+		l2cap_chan_put(chan);
 		BT_ERR("psm cannot be added err %d", err);
 		return NULL;
 	}
 
-	return pchan;
+	return chan;
 }
 
 static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index adcbc74..d2b3dd3 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -134,6 +134,66 @@
 	.llseek		= default_llseek,
 };
 
+static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct hci_dev *hdev = file->private_data;
+	char buf[3];
+
+	buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
+	buf[1] = '\n';
+	buf[2] = '\0';
+	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
+				 size_t count, loff_t *ppos)
+{
+	struct hci_dev *hdev = file->private_data;
+	char buf[32];
+	size_t buf_size = min(count, (sizeof(buf)-1));
+	bool enable;
+	int err;
+
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = '\0';
+	if (strtobool(buf, &enable))
+		return -EINVAL;
+
+	hci_req_lock(hdev);
+	err = hdev->set_diag(hdev, enable);
+	hci_req_unlock(hdev);
+
+	if (err < 0)
+		return err;
+
+	if (enable)
+		hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
+	else
+		hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
+
+	return count;
+}
+
+static const struct file_operations vendor_diag_fops = {
+	.open		= simple_open,
+	.read		= vendor_diag_read,
+	.write		= vendor_diag_write,
+	.llseek		= default_llseek,
+};
+
+static void hci_debugfs_create_basic(struct hci_dev *hdev)
+{
+	debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
+			    &dut_mode_fops);
+
+	if (hdev->set_diag)
+		debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
+				    &vendor_diag_fops);
+}
+
 /* ---- HCI requests ---- */
 
 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
@@ -693,7 +753,8 @@
 
 	hci_setup_event_mask(req);
 
-	if (hdev->commands[6] & 0x20) {
+	if (hdev->commands[6] & 0x20 &&
+	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
 		struct hci_cp_read_stored_link_key cp;
 
 		bacpy(&cp.bdaddr, BDADDR_ANY);
@@ -849,13 +910,8 @@
 	if (err < 0)
 		return err;
 
-	/* The Device Under Test (DUT) mode is special and available for
-	 * all controller types. So just create it early on.
-	 */
-	if (hci_dev_test_flag(hdev, HCI_SETUP)) {
-		debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
-				    &dut_mode_fops);
-	}
+	if (hci_dev_test_flag(hdev, HCI_SETUP))
+		hci_debugfs_create_basic(hdev);
 
 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
 	if (err < 0)
@@ -932,6 +988,9 @@
 	if (err < 0)
 		return err;
 
+	if (hci_dev_test_flag(hdev, HCI_SETUP))
+		hci_debugfs_create_basic(hdev);
+
 	return 0;
 }
 
@@ -1384,6 +1443,9 @@
 		goto done;
 	}
 
+	set_bit(HCI_RUNNING, &hdev->flags);
+	hci_notify(hdev, HCI_DEV_OPEN);
+
 	atomic_set(&hdev->cmd_cnt, 1);
 	set_bit(HCI_INIT, &hdev->flags);
 
@@ -1465,6 +1527,9 @@
 			hdev->sent_cmd = NULL;
 		}
 
+		clear_bit(HCI_RUNNING, &hdev->flags);
+		hci_notify(hdev, HCI_DEV_CLOSE);
+
 		hdev->close(hdev);
 		hdev->flags &= BIT(HCI_RAW);
 	}
@@ -1548,8 +1613,10 @@
 	BT_DBG("All LE pending actions cleared");
 }
 
-static int hci_dev_do_close(struct hci_dev *hdev)
+int hci_dev_do_close(struct hci_dev *hdev)
 {
+	bool auto_off;
+
 	BT_DBG("%s %p", hdev->name, hdev);
 
 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
@@ -1605,10 +1672,10 @@
 
 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 
-	if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
-		if (hdev->dev_type == HCI_BREDR)
-			mgmt_powered(hdev, 0);
-	}
+	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
+
+	if (!auto_off && hdev->dev_type == HCI_BREDR)
+		mgmt_powered(hdev, 0);
 
 	hci_inquiry_cache_flush(hdev);
 	hci_pend_le_actions_clear(hdev);
@@ -1625,9 +1692,8 @@
 	/* Reset device */
 	skb_queue_purge(&hdev->cmd_q);
 	atomic_set(&hdev->cmd_cnt, 1);
-	if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
-	    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
-	    test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
+	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
+	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
 		set_bit(HCI_INIT, &hdev->flags);
 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
 		clear_bit(HCI_INIT, &hdev->flags);
@@ -1648,6 +1714,9 @@
 		hdev->sent_cmd = NULL;
 	}
 
+	clear_bit(HCI_RUNNING, &hdev->flags);
+	hci_notify(hdev, HCI_DEV_CLOSE);
+
 	/* After this point our queues are empty
 	 * and no tasks are scheduled. */
 	hdev->close(hdev);
@@ -3470,6 +3539,13 @@
 		return -ENXIO;
 	}
 
+	if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
+	    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+	    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
 	/* Incoming skb */
 	bt_cb(skb)->incoming = 1;
 
@@ -3483,6 +3559,21 @@
 }
 EXPORT_SYMBOL(hci_recv_frame);
 
+/* Receive diagnostic message from HCI drivers */
+int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	/* Time stamp */
+	__net_timestamp(skb);
+
+	/* Mark as diagnostic packet and send to monitor */
+	bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
+	hci_send_to_monitor(hdev, skb);
+
+	kfree_skb(skb);
+	return 0;
+}
+EXPORT_SYMBOL(hci_recv_diag);
+
 /* ---- Interface to upper protocols ---- */
 
 int hci_register_cb(struct hci_cb *cb)
@@ -3529,6 +3620,11 @@
 	/* Get rid of skb owner, prior to sending to the driver. */
 	skb_orphan(skb);
 
+	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
+		kfree_skb(skb);
+		return;
+	}
+
 	err = hdev->send(hdev, skb);
 	if (err < 0) {
 		BT_ERR("%s sending frame failed (%d)", hdev->name, err);
@@ -3579,6 +3675,25 @@
 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
 }
 
+/* Send HCI command and wait for command commplete event */
+struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+			     const void *param, u32 timeout)
+{
+	struct sk_buff *skb;
+
+	if (!test_bit(HCI_UP, &hdev->flags))
+		return ERR_PTR(-ENETDOWN);
+
+	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
+
+	hci_req_lock(hdev);
+	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
+	hci_req_unlock(hdev);
+
+	return skb;
+}
+EXPORT_SYMBOL(hci_cmd_sync);
+
 /* Send ACL data */
 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
 {
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 1860418..8acec93 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -4719,6 +4719,27 @@
 	struct hci_conn *conn;
 	bool match;
 	u32 flags;
+	u8 *ptr, real_len;
+
+	/* Find the end of the data in case the report contains padded zero
+	 * bytes at the end causing an invalid length value.
+	 *
+	 * When data is NULL, len is 0 so there is no need for extra ptr
+	 * check as 'ptr < data + 0' is already false in such case.
+	 */
+	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
+		if (ptr + 1 + *ptr > data + len)
+			break;
+	}
+
+	real_len = ptr - data;
+
+	/* Adjust for actual length */
+	if (len != real_len) {
+		BT_ERR_RATELIMITED("%s advertising data length corrected",
+				   hdev->name);
+		len = real_len;
+	}
 
 	/* If the direct address is present, then this report is from
 	 * a LE Direct Advertising Report event. In that case it is
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index f2d30d1..9a100c1 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -279,6 +279,9 @@
 		else
 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 		break;
+	case HCI_DIAG_PKT:
+		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
+		break;
 	default:
 		return;
 	}
@@ -303,6 +306,7 @@
 {
 	struct hci_mon_hdr *hdr;
 	struct hci_mon_new_index *ni;
+	struct hci_mon_index_info *ii;
 	struct sk_buff *skb;
 	__le16 opcode;
 
@@ -312,7 +316,7 @@
 		if (!skb)
 			return NULL;
 
-		ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
+		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 		ni->type = hdev->dev_type;
 		ni->bus = hdev->bus;
 		bacpy(&ni->bdaddr, &hdev->bdaddr);
@@ -329,6 +333,34 @@
 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 		break;
 
+	case HCI_DEV_UP:
+		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
+		if (!skb)
+			return NULL;
+
+		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
+		bacpy(&ii->bdaddr, &hdev->bdaddr);
+		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
+
+		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
+		break;
+
+	case HCI_DEV_OPEN:
+		skb = bt_skb_alloc(0, GFP_ATOMIC);
+		if (!skb)
+			return NULL;
+
+		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
+		break;
+
+	case HCI_DEV_CLOSE:
+		skb = bt_skb_alloc(0, GFP_ATOMIC);
+		if (!skb)
+			return NULL;
+
+		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
+		break;
+
 	default:
 		return NULL;
 	}
@@ -358,6 +390,26 @@
 
 		if (sock_queue_rcv_skb(sk, skb))
 			kfree_skb(skb);
+
+		if (!test_bit(HCI_RUNNING, &hdev->flags))
+			continue;
+
+		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
+		if (!skb)
+			continue;
+
+		if (sock_queue_rcv_skb(sk, skb))
+			kfree_skb(skb);
+
+		if (!test_bit(HCI_UP, &hdev->flags))
+			continue;
+
+		skb = create_monitor_event(hdev, HCI_DEV_UP);
+		if (!skb)
+			continue;
+
+		if (sock_queue_rcv_skb(sk, skb))
+			kfree_skb(skb);
 	}
 
 	read_unlock(&hci_dev_list_lock);
@@ -392,14 +444,12 @@
 
 void hci_sock_dev_event(struct hci_dev *hdev, int event)
 {
-	struct hci_ev_si_device ev;
-
 	BT_DBG("hdev %s event %d", hdev->name, event);
 
-	/* Send event to monitor */
 	if (atomic_read(&monitor_promisc)) {
 		struct sk_buff *skb;
 
+		/* Send event to monitor */
 		skb = create_monitor_event(hdev, event);
 		if (skb) {
 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
@@ -408,10 +458,14 @@
 		}
 	}
 
-	/* Send event to sockets */
-	ev.event  = event;
-	ev.dev_id = hdev->id;
-	hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
+	if (event <= HCI_DEV_DOWN) {
+		struct hci_ev_si_device ev;
+
+		/* Send event to sockets */
+		ev.event  = event;
+		ev.dev_id = hdev->id;
+		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
+	}
 
 	if (event == HCI_DEV_UNREG) {
 		struct sock *sk;
@@ -503,7 +557,16 @@
 
 	if (hdev) {
 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
-			hci_dev_close(hdev->id);
+			/* When releasing an user channel exclusive access,
+			 * call hci_dev_do_close directly instead of calling
+			 * hci_dev_close to ensure the exclusive access will
+			 * be released and the controller brought back down.
+			 *
+			 * The checking of HCI_AUTO_OFF is not needed in this
+			 * case since it will have been cleared already when
+			 * opening the user channel.
+			 */
+			hci_dev_do_close(hdev);
 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 			mgmt_index_added(hdev);
 		}
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index b36bc04..aa4cf64 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -151,6 +151,22 @@
 }
 EXPORT_SYMBOL(bt_info);
 
+void bt_warn(const char *format, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, format);
+
+	vaf.fmt = format;
+	vaf.va = &args;
+
+	pr_warn("%pV", &vaf);
+
+	va_end(args);
+}
+EXPORT_SYMBOL(bt_warn);
+
 void bt_err(const char *format, ...)
 {
 	struct va_format vaf;
@@ -166,3 +182,19 @@
 	va_end(args);
 }
 EXPORT_SYMBOL(bt_err);
+
+void bt_err_ratelimited(const char *format, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, format);
+
+	vaf.fmt = format;
+	vaf.va = &args;
+
+	pr_err_ratelimited("%pV", &vaf);
+
+	va_end(args);
+}
+EXPORT_SYMBOL(bt_err_ratelimited);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index ad82324..25644e1 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -495,7 +495,7 @@
 	}
 
 	/* The output of the random address function ah is:
-	 *	ah(h, r) = e(k, r') mod 2^24
+	 *	ah(k, r) = e(k, r') mod 2^24
 	 * The output of the security function e is then truncated to 24 bits
 	 * by taking the least significant 24 bits of the output of e as the
 	 * result of ah.
@@ -2311,12 +2311,6 @@
 	if (!conn)
 		return 1;
 
-	chan = conn->smp;
-	if (!chan) {
-		BT_ERR("SMP security requested but not available");
-		return 1;
-	}
-
 	if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
 		return 1;
 
@@ -2330,6 +2324,12 @@
 		if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
 			return 0;
 
+	chan = conn->smp;
+	if (!chan) {
+		BT_ERR("SMP security requested but not available");
+		return 1;
+	}
+
 	l2cap_chan_lock(chan);
 
 	/* If SMP is already in progress ignore this request */
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 6ed2feb..bdfb954 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -56,7 +56,7 @@
 	skb_reset_mac_header(skb);
 	skb_pull(skb, ETH_HLEN);
 
-	if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
+	if (!br_allowed_ingress(br, br_vlan_group(br), skb, &vid))
 		goto out;
 
 	if (is_broadcast_ether_addr(dest))
@@ -391,7 +391,7 @@
 	br->bridge_max_age = br->max_age = 20 * HZ;
 	br->bridge_hello_time = br->hello_time = 2 * HZ;
 	br->bridge_forward_delay = br->forward_delay = 15 * HZ;
-	br->ageing_time = 300 * HZ;
+	br->ageing_time = BR_DEFAULT_AGEING_TIME;
 
 	br_netfilter_rtable_init(br);
 	br_stp_timer_init(br);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 9e9875d..f43ce05 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -133,15 +133,13 @@
 
 static void fdb_del_external_learn(struct net_bridge_fdb_entry *f)
 {
-	struct switchdev_obj obj = {
-		.id = SWITCHDEV_OBJ_PORT_FDB,
-		.u.fdb = {
-			.addr = f->addr.addr,
-			.vid = f->vlan_id,
-		},
+	struct switchdev_obj_port_fdb fdb = {
+		.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
+		.addr = f->addr.addr,
+		.vid = f->vlan_id,
 	};
 
-	switchdev_port_obj_del(f->dst->dev, &obj);
+	switchdev_port_obj_del(f->dst->dev, &fdb.obj);
 }
 
 static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
@@ -163,22 +161,27 @@
 			     struct net_bridge_fdb_entry *f)
 {
 	const unsigned char *addr = f->addr.addr;
-	u16 vid = f->vlan_id;
+	struct net_bridge_vlan_group *vg;
+	const struct net_bridge_vlan *v;
 	struct net_bridge_port *op;
+	u16 vid = f->vlan_id;
 
 	/* Maybe another port has same hw addr? */
 	list_for_each_entry(op, &br->port_list, list) {
+		vg = nbp_vlan_group(op);
 		if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
-		    (!vid || nbp_vlan_find(op, vid))) {
+		    (!vid || br_vlan_find(vg, vid))) {
 			f->dst = op;
 			f->added_by_user = 0;
 			return;
 		}
 	}
 
+	vg = br_vlan_group(br);
+	v = br_vlan_find(vg, vid);
 	/* Maybe bridge device has same hw addr? */
 	if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
-	    (!vid || br_vlan_find(br, vid))) {
+	    (!vid || (v && br_vlan_should_use(v)))) {
 		f->dst = NULL;
 		f->added_by_user = 0;
 		return;
@@ -203,14 +206,14 @@
 
 void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
 {
+	struct net_bridge_vlan_group *vg;
 	struct net_bridge *br = p->br;
-	struct net_port_vlans *pv = nbp_get_vlan_info(p);
-	bool no_vlan = !pv;
+	struct net_bridge_vlan *v;
 	int i;
-	u16 vid;
 
 	spin_lock_bh(&br->hash_lock);
 
+	vg = nbp_vlan_group(p);
 	/* Search all chains since old address/hash is unknown */
 	for (i = 0; i < BR_HASH_SIZE; i++) {
 		struct hlist_node *h;
@@ -226,7 +229,7 @@
 				 * configured, we can safely be done at
 				 * this point.
 				 */
-				if (no_vlan)
+				if (!vg || !vg->num_vlans)
 					goto insert;
 			}
 		}
@@ -236,15 +239,15 @@
 	/* insert new address,  may fail if invalid address or dup. */
 	fdb_insert(br, p, newaddr, 0);
 
-	if (no_vlan)
+	if (!vg || !vg->num_vlans)
 		goto done;
 
 	/* Now add entries for every VLAN configured on the port.
 	 * This function runs under RTNL so the bitmap will not change
 	 * from under us.
 	 */
-	for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
-		fdb_insert(br, p, newaddr, vid);
+	list_for_each_entry(v, &vg->vlan_list, vlist)
+		fdb_insert(br, p, newaddr, v->vid);
 
 done:
 	spin_unlock_bh(&br->hash_lock);
@@ -252,9 +255,9 @@
 
 void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
 {
+	struct net_bridge_vlan_group *vg;
 	struct net_bridge_fdb_entry *f;
-	struct net_port_vlans *pv;
-	u16 vid = 0;
+	struct net_bridge_vlan *v;
 
 	spin_lock_bh(&br->hash_lock);
 
@@ -264,20 +267,18 @@
 		fdb_delete_local(br, NULL, f);
 
 	fdb_insert(br, NULL, newaddr, 0);
-
+	vg = br_vlan_group(br);
+	if (!vg || !vg->num_vlans)
+		goto out;
 	/* Now remove and add entries for every VLAN configured on the
 	 * bridge.  This function runs under RTNL so the bitmap will not
 	 * change from under us.
 	 */
-	pv = br_get_vlan_info(br);
-	if (!pv)
-		goto out;
-
-	for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
-		f = __br_fdb_get(br, br->dev->dev_addr, vid);
+	list_for_each_entry(v, &vg->vlan_list, vlist) {
+		f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
 		if (f && f->is_local && !f->dst)
 			fdb_delete_local(br, NULL, f);
-		fdb_insert(br, NULL, newaddr, vid);
+		fdb_insert(br, NULL, newaddr, v->vid);
 	}
 out:
 	spin_unlock_bh(&br->hash_lock);
@@ -299,6 +300,8 @@
 			unsigned long this_timer;
 			if (f->is_static)
 				continue;
+			if (f->added_by_external_learn)
+				continue;
 			this_timer = f->updated + delay;
 			if (time_before_eq(this_timer, jiffies))
 				fdb_delete(br, f);
@@ -605,13 +608,14 @@
 	}
 }
 
-static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
+static int fdb_to_nud(const struct net_bridge *br,
+		      const struct net_bridge_fdb_entry *fdb)
 {
 	if (fdb->is_local)
 		return NUD_PERMANENT;
 	else if (fdb->is_static)
 		return NUD_NOARP;
-	else if (has_expired(fdb->dst->br, fdb))
+	else if (has_expired(br, fdb))
 		return NUD_STALE;
 	else
 		return NUD_REACHABLE;
@@ -637,7 +641,7 @@
 	ndm->ndm_flags	 = fdb->added_by_external_learn ? NTF_EXT_LEARNED : 0;
 	ndm->ndm_type	 = 0;
 	ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
-	ndm->ndm_state   = fdb_to_nud(fdb);
+	ndm->ndm_state   = fdb_to_nud(br, fdb);
 
 	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
 		goto nla_put_failure;
@@ -782,7 +786,7 @@
 		}
 	}
 
-	if (fdb_to_nud(fdb) != state) {
+	if (fdb_to_nud(br, fdb) != state) {
 		if (state & NUD_PERMANENT) {
 			fdb->is_local = 1;
 			if (!fdb->is_static) {
@@ -842,9 +846,11 @@
 	       struct net_device *dev,
 	       const unsigned char *addr, u16 vid, u16 nlh_flags)
 {
-	struct net_bridge_port *p;
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_port *p = NULL;
+	struct net_bridge_vlan *v;
+	struct net_bridge *br = NULL;
 	int err = 0;
-	struct net_port_vlans *pv;
 
 	if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
 		pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
@@ -856,34 +862,51 @@
 		return -EINVAL;
 	}
 
-	p = br_port_get_rtnl(dev);
-	if (p == NULL) {
-		pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
-			dev->name);
-		return -EINVAL;
+	if (dev->priv_flags & IFF_EBRIDGE) {
+		br = netdev_priv(dev);
+		vg = br_vlan_group(br);
+	} else {
+		p = br_port_get_rtnl(dev);
+		if (!p) {
+			pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
+				dev->name);
+			return -EINVAL;
+		}
+		vg = nbp_vlan_group(p);
 	}
 
-	pv = nbp_get_vlan_info(p);
 	if (vid) {
-		if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
-			pr_info("bridge: RTM_NEWNEIGH with unconfigured "
-				"vlan %d on port %s\n", vid, dev->name);
+		v = br_vlan_find(vg, vid);
+		if (!v || !br_vlan_should_use(v)) {
+			pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
 			return -EINVAL;
 		}
 
 		/* VID was specified, so use it. */
-		err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
+		if (dev->priv_flags & IFF_EBRIDGE)
+			err = br_fdb_insert(br, NULL, addr, vid);
+		else
+			err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
 	} else {
-		err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
-		if (err || !pv)
+		if (dev->priv_flags & IFF_EBRIDGE)
+			err = br_fdb_insert(br, NULL, addr, 0);
+		else
+			err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
+		if (err || !vg || !vg->num_vlans)
 			goto out;
 
 		/* We have vlans configured on this port and user didn't
 		 * specify a VLAN.  To be nice, add/update entry for every
 		 * vlan on this port.
 		 */
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-			err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
+		list_for_each_entry(v, &vg->vlan_list, vlist) {
+			if (!br_vlan_should_use(v))
+				continue;
+			if (dev->priv_flags & IFF_EBRIDGE)
+				err = br_fdb_insert(br, NULL, addr, v->vid);
+			else
+				err = __br_fdb_add(ndm, p, addr, nlh_flags,
+						   v->vid);
 			if (err)
 				goto out;
 		}
@@ -893,6 +916,32 @@
 	return err;
 }
 
+static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr,
+			      u16 vid)
+{
+	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
+	struct net_bridge_fdb_entry *fdb;
+
+	fdb = fdb_find(head, addr, vid);
+	if (!fdb)
+		return -ENOENT;
+
+	fdb_delete(br, fdb);
+	return 0;
+}
+
+static int __br_fdb_delete_by_addr(struct net_bridge *br,
+				   const unsigned char *addr, u16 vid)
+{
+	int err;
+
+	spin_lock_bh(&br->hash_lock);
+	err = fdb_delete_by_addr(br, addr, vid);
+	spin_unlock_bh(&br->hash_lock);
+
+	return err;
+}
+
 static int fdb_delete_by_addr_and_port(struct net_bridge_port *p,
 				       const u8 *addr, u16 vlan)
 {
@@ -925,38 +974,53 @@
 		  struct net_device *dev,
 		  const unsigned char *addr, u16 vid)
 {
-	struct net_bridge_port *p;
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_port *p = NULL;
+	struct net_bridge_vlan *v;
+	struct net_bridge *br = NULL;
 	int err;
-	struct net_port_vlans *pv;
 
-	p = br_port_get_rtnl(dev);
-	if (p == NULL) {
-		pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
-			dev->name);
-		return -EINVAL;
+	if (dev->priv_flags & IFF_EBRIDGE) {
+		br = netdev_priv(dev);
+		vg = br_vlan_group(br);
+	} else {
+		p = br_port_get_rtnl(dev);
+		if (!p) {
+			pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
+				dev->name);
+			return -EINVAL;
+		}
+		vg = nbp_vlan_group(p);
 	}
 
-	pv = nbp_get_vlan_info(p);
 	if (vid) {
-		if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
-			pr_info("bridge: RTM_DELNEIGH with unconfigured "
-				"vlan %d on port %s\n", vid, dev->name);
+		v = br_vlan_find(vg, vid);
+		if (!v) {
+			pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
 			return -EINVAL;
 		}
 
-		err = __br_fdb_delete(p, addr, vid);
+		if (dev->priv_flags & IFF_EBRIDGE)
+			err = __br_fdb_delete_by_addr(br, addr, vid);
+		else
+			err = __br_fdb_delete(p, addr, vid);
 	} else {
 		err = -ENOENT;
-		err &= __br_fdb_delete(p, addr, 0);
-		if (!pv)
+		if (dev->priv_flags & IFF_EBRIDGE)
+			err = __br_fdb_delete_by_addr(br, addr, 0);
+		else
+			err &= __br_fdb_delete(p, addr, 0);
+
+		if (!vg || !vg->num_vlans)
 			goto out;
 
-		/* We have vlans configured on this port and user didn't
-		 * specify a VLAN.  To be nice, add/update entry for every
-		 * vlan on this port.
-		 */
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-			err &= __br_fdb_delete(p, addr, vid);
+		list_for_each_entry(v, &vg->vlan_list, vlist) {
+			if (!br_vlan_should_use(v))
+				continue;
+			if (dev->priv_flags & IFF_EBRIDGE)
+				err = __br_fdb_delete_by_addr(br, addr, v->vid);
+			else
+				err &= __br_fdb_delete(p, addr, v->vid);
 		}
 	}
 out:
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 48afca7..6d5ed79 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -30,9 +30,11 @@
 static inline int should_deliver(const struct net_bridge_port *p,
 				 const struct sk_buff *skb)
 {
+	struct net_bridge_vlan_group *vg;
+
+	vg = nbp_vlan_group(p);
 	return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
-		br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
-		p->state == BR_STATE_FORWARDING;
+		br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING;
 }
 
 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -76,7 +78,10 @@
 
 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 {
-	skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
+	struct net_bridge_vlan_group *vg;
+
+	vg = nbp_vlan_group(to);
+	skb = br_handle_vlan(to->br, vg, skb);
 	if (!skb)
 		return;
 
@@ -99,6 +104,7 @@
 
 static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
 {
+	struct net_bridge_vlan_group *vg;
 	struct net_device *indev;
 
 	if (skb_warn_if_lro(skb)) {
@@ -106,7 +112,8 @@
 		return;
 	}
 
-	skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
+	vg = nbp_vlan_group(to);
+	skb = br_handle_vlan(to->br, vg, skb);
 	if (!skb)
 		return;
 
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 45e4757..934cae9 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -248,7 +248,6 @@
 
 	list_del_rcu(&p->list);
 
-	nbp_vlan_flush(p);
 	br_fdb_delete_by_port(br, p, 0, 1);
 	nbp_update_port_count(br);
 
@@ -257,6 +256,8 @@
 	dev->priv_flags &= ~IFF_BRIDGE_PORT;
 
 	netdev_rx_handler_unregister(dev);
+	/* use the synchronize_rcu done by netdev_rx_handler_unregister */
+	nbp_vlan_flush(p);
 
 	br_multicast_del_port(p);
 
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 223f404..f5c5a45 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -36,28 +36,28 @@
 {
 	struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
 	struct net_bridge *br = netdev_priv(brdev);
+	struct net_bridge_vlan_group *vg;
 	struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
-	struct net_port_vlans *pv;
 
 	u64_stats_update_begin(&brstats->syncp);
 	brstats->rx_packets++;
 	brstats->rx_bytes += skb->len;
 	u64_stats_update_end(&brstats->syncp);
 
+	vg = br_vlan_group(br);
 	/* Bridge is just like any other port.  Make sure the
 	 * packet is allowed except in promisc modue when someone
 	 * may be running packet capture.
 	 */
-	pv = br_get_vlan_info(br);
 	if (!(brdev->flags & IFF_PROMISC) &&
-	    !br_allowed_egress(br, pv, skb)) {
+	    !br_allowed_egress(vg, skb)) {
 		kfree_skb(skb);
 		return NET_RX_DROP;
 	}
 
 	indev = skb->dev;
 	skb->dev = brdev;
-	skb = br_handle_vlan(br, pv, skb);
+	skb = br_handle_vlan(br, vg, skb);
 	if (!skb)
 		return NET_RX_DROP;
 
@@ -140,7 +140,7 @@
 	if (!p || p->state == BR_STATE_DISABLED)
 		goto drop;
 
-	if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid))
+	if (!br_allowed_ingress(p->br, nbp_vlan_group(p), skb, &vid))
 		goto out;
 
 	/* insert into forwarding database after filtering to avoid spoofing */
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 8d423bc..263b4de 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -200,8 +200,7 @@
 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
-		br->ageing_time = clock_t_to_jiffies(args[1]);
-		return 0;
+		return br_set_ageing_time(br, args[1]);
 
 	case BRCTL_GET_PORT_INFO:
 	{
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index d747275..cd8deea 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -464,11 +464,11 @@
 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
 	struct net *net = sock_net(skb->sk);
-	unsigned short vid = VLAN_N_VID;
+	struct net_bridge_vlan_group *vg;
 	struct net_device *dev, *pdev;
 	struct br_mdb_entry *entry;
 	struct net_bridge_port *p;
-	struct net_port_vlans *pv;
+	struct net_bridge_vlan *v;
 	struct net_bridge *br;
 	int err;
 
@@ -489,10 +489,10 @@
 	if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 		return -EINVAL;
 
-	pv = nbp_get_vlan_info(p);
-	if (br_vlan_enabled(br) && pv && entry->vid == 0) {
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-			entry->vid = vid;
+	vg = nbp_vlan_group(p);
+	if (br_vlan_enabled(br) && vg && entry->vid == 0) {
+		list_for_each_entry(v, &vg->vlan_list, vlist) {
+			entry->vid = v->vid;
 			err = __br_mdb_add(net, br, entry);
 			if (err)
 				break;
@@ -566,11 +566,11 @@
 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
 	struct net *net = sock_net(skb->sk);
-	unsigned short vid = VLAN_N_VID;
+	struct net_bridge_vlan_group *vg;
 	struct net_device *dev, *pdev;
 	struct br_mdb_entry *entry;
 	struct net_bridge_port *p;
-	struct net_port_vlans *pv;
+	struct net_bridge_vlan *v;
 	struct net_bridge *br;
 	int err;
 
@@ -591,10 +591,10 @@
 	if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 		return -EINVAL;
 
-	pv = nbp_get_vlan_info(p);
-	if (br_vlan_enabled(br) && pv && entry->vid == 0) {
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-			entry->vid = vid;
+	vg = nbp_vlan_group(p);
+	if (br_vlan_enabled(br) && vg && entry->vid == 0) {
+		list_for_each_entry(v, &vg->vlan_list, vlist) {
+			entry->vid = v->vid;
 			err = __br_mdb_del(br, entry);
 			if (!err)
 				__br_mdb_notify(dev, entry, RTM_DELMDB);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index b4d858a..03661d9 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1006,7 +1006,7 @@
 
 	ih = igmpv3_report_hdr(skb);
 	num = ntohs(ih->ngrec);
-	len = sizeof(*ih);
+	len = skb_transport_offset(skb) + sizeof(*ih);
 
 	for (i = 0; i < num; i++) {
 		len += sizeof(*grec);
@@ -1067,7 +1067,7 @@
 
 	icmp6h = icmp6_hdr(skb);
 	num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
-	len = sizeof(*icmp6h);
+	len = skb_transport_offset(skb) + sizeof(*icmp6h);
 
 	for (i = 0; i < num; i++) {
 		__be16 *nsrcs, _nsrcs;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index e6e76bb..370aa4d 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -189,10 +189,9 @@
  * expected format
  */
 
-static int br_validate_ipv4(struct sk_buff *skb)
+static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
 {
 	const struct iphdr *iph;
-	struct net_device *dev = skb->dev;
 	u32 len;
 
 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
@@ -213,13 +212,13 @@
 
 	len = ntohs(iph->tot_len);
 	if (skb->len < len) {
-		IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
+		IP_INC_STATS_BH(net, IPSTATS_MIB_INTRUNCATEDPKTS);
 		goto drop;
 	} else if (len < (iph->ihl*4))
 		goto inhdr_error;
 
 	if (pskb_trim_rcsum(skb, len)) {
-		IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
+		IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS);
 		goto drop;
 	}
 
@@ -232,7 +231,7 @@
 	return 0;
 
 inhdr_error:
-	IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
+	IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS);
 drop:
 	return -1;
 }
@@ -464,7 +463,7 @@
  * receiving device) to make netfilter happy, the REDIRECT
  * target in particular.  Save the original destination IP
  * address to be able to detect DNAT afterwards. */
-static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
+static unsigned int br_nf_pre_routing(void *priv,
 				      struct sk_buff *skb,
 				      const struct nf_hook_state *state)
 {
@@ -486,7 +485,7 @@
 			return NF_ACCEPT;
 
 		nf_bridge_pull_encap_header_rcsum(skb);
-		return br_nf_pre_routing_ipv6(ops, skb, state);
+		return br_nf_pre_routing_ipv6(priv, skb, state);
 	}
 
 	if (!brnf_call_iptables && !br->nf_call_iptables)
@@ -497,7 +496,7 @@
 
 	nf_bridge_pull_encap_header_rcsum(skb);
 
-	if (br_validate_ipv4(skb))
+	if (br_validate_ipv4(state->net, skb))
 		return NF_DROP;
 
 	nf_bridge_put(skb->nf_bridge);
@@ -526,7 +525,7 @@
  * took place when the packet entered the bridge), but we
  * register an IPv4 PRE_ROUTING 'sabotage' hook that will
  * prevent this from happening. */
-static unsigned int br_nf_local_in(const struct nf_hook_ops *ops,
+static unsigned int br_nf_local_in(void *priv,
 				   struct sk_buff *skb,
 				   const struct nf_hook_state *state)
 {
@@ -570,7 +569,7 @@
  * but we are still able to filter on the 'real' indev/outdev
  * because of the physdev module. For ARP, indev and outdev are the
  * bridge ports. */
-static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
+static unsigned int br_nf_forward_ip(void *priv,
 				     struct sk_buff *skb,
 				     const struct nf_hook_state *state)
 {
@@ -609,13 +608,13 @@
 	}
 
 	if (pf == NFPROTO_IPV4) {
-		if (br_validate_ipv4(skb))
+		if (br_validate_ipv4(state->net, skb))
 			return NF_DROP;
 		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
 	}
 
 	if (pf == NFPROTO_IPV6) {
-		if (br_validate_ipv6(skb))
+		if (br_validate_ipv6(state->net, skb))
 			return NF_DROP;
 		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
 	}
@@ -633,7 +632,7 @@
 	return NF_STOLEN;
 }
 
-static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
+static unsigned int br_nf_forward_arp(void *priv,
 				      struct sk_buff *skb,
 				      const struct nf_hook_state *state)
 {
@@ -692,17 +691,12 @@
 	nf_bridge_info_free(skb);
 	return br_dev_queue_push_xmit(net, sk, skb);
 }
-static int br_nf_push_frag_xmit_sk(struct sock *sk, struct sk_buff *skb)
-{
-	struct net *net = dev_net(skb_dst(skb)->dev);
-	return br_nf_push_frag_xmit(net, sk, skb);
-}
 #endif
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 static int
 br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
-		  int (*output)(struct sock *, struct sk_buff *))
+		  int (*output)(struct net *, struct sock *, struct sk_buff *))
 {
 	unsigned int mtu = ip_skb_dst_mtu(skb);
 	struct iphdr *iph = ip_hdr(skb);
@@ -715,7 +709,7 @@
 		return -EMSGSIZE;
 	}
 
-	return ip_do_fragment(sk, skb, output);
+	return ip_do_fragment(net, sk, skb, output);
 }
 #endif
 
@@ -747,7 +741,7 @@
 	if (skb->protocol == htons(ETH_P_IP)) {
 		struct brnf_frag_data *data;
 
-		if (br_validate_ipv4(skb))
+		if (br_validate_ipv4(net, skb))
 			goto drop;
 
 		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
@@ -764,7 +758,7 @@
 		skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
 						 data->size);
 
-		return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit_sk);
+		return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
 	}
 #endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
@@ -772,7 +766,7 @@
 		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
 		struct brnf_frag_data *data;
 
-		if (br_validate_ipv6(skb))
+		if (br_validate_ipv6(net, skb))
 			goto drop;
 
 		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
@@ -787,7 +781,7 @@
 						 data->size);
 
 		if (v6ops)
-			return v6ops->fragment(sk, skb, br_nf_push_frag_xmit_sk);
+			return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
 
 		kfree_skb(skb);
 		return -EMSGSIZE;
@@ -801,7 +795,7 @@
 }
 
 /* PF_BRIDGE/POST_ROUTING ********************************************/
-static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
+static unsigned int br_nf_post_routing(void *priv,
 				       struct sk_buff *skb,
 				       const struct nf_hook_state *state)
 {
@@ -850,7 +844,7 @@
 /* IP/SABOTAGE *****************************************************/
 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
  * for the second time. */
-static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
+static unsigned int ip_sabotage_in(void *priv,
 				   struct sk_buff *skb,
 				   const struct nf_hook_state *state)
 {
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index e4dbbe4..d61f56e 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -100,10 +100,9 @@
 	return -1;
 }
 
-int br_validate_ipv6(struct sk_buff *skb)
+int br_validate_ipv6(struct net *net, struct sk_buff *skb)
 {
 	const struct ipv6hdr *hdr;
-	struct net_device *dev = skb->dev;
 	struct inet6_dev *idev = __in6_dev_get(skb->dev);
 	u32 pkt_len;
 	u8 ip6h_len = sizeof(struct ipv6hdr);
@@ -123,12 +122,12 @@
 
 	if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
 		if (pkt_len + ip6h_len > skb->len) {
-			IP6_INC_STATS_BH(dev_net(dev), idev,
+			IP6_INC_STATS_BH(net, idev,
 					 IPSTATS_MIB_INTRUNCATEDPKTS);
 			goto drop;
 		}
 		if (pskb_trim_rcsum(skb, pkt_len + ip6h_len)) {
-			IP6_INC_STATS_BH(dev_net(dev), idev,
+			IP6_INC_STATS_BH(net, idev,
 					 IPSTATS_MIB_INDISCARDS);
 			goto drop;
 		}
@@ -143,7 +142,7 @@
 	return 0;
 
 inhdr_error:
-	IP6_INC_STATS_BH(dev_net(dev), idev, IPSTATS_MIB_INHDRERRORS);
+	IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
 drop:
 	return -1;
 }
@@ -218,13 +217,13 @@
 /* Replicate the checks that IPv6 does on packet reception and pass the packet
  * to ip6tables.
  */
-unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
+unsigned int br_nf_pre_routing_ipv6(void *priv,
 				    struct sk_buff *skb,
 				    const struct nf_hook_state *state)
 {
 	struct nf_bridge_info *nf_bridge;
 
-	if (br_validate_ipv6(skb))
+	if (br_validate_ipv6(state->net, skb))
 		return NF_DROP;
 
 	nf_bridge_put(skb->nf_bridge);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index ea748c9..d792d1a 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -21,36 +21,35 @@
 #include "br_private.h"
 #include "br_private_stp.h"
 
-static int br_get_num_vlan_infos(const struct net_port_vlans *pv,
-				 u32 filter_mask)
+static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
+				u32 filter_mask)
 {
-	u16 vid_range_start = 0, vid_range_end = 0;
-	u16 vid_range_flags = 0;
-	u16 pvid, vid, flags;
+	struct net_bridge_vlan *v;
+	u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
+	u16 flags, pvid;
 	int num_vlans = 0;
 
-	if (filter_mask & RTEXT_FILTER_BRVLAN)
-		return pv->num_vlans;
-
 	if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
 		return 0;
 
-	/* Count number of vlan info's
-	 */
-	pvid = br_get_pvid(pv);
-	for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+	pvid = br_get_pvid(vg);
+	/* Count number of vlan infos */
+	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
 		flags = 0;
-		if (vid == pvid)
+		/* only a context, bridge vlan not activated */
+		if (!br_vlan_should_use(v))
+			continue;
+		if (v->vid == pvid)
 			flags |= BRIDGE_VLAN_INFO_PVID;
 
-		if (test_bit(vid, pv->untagged_bitmap))
+		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 			flags |= BRIDGE_VLAN_INFO_UNTAGGED;
 
 		if (vid_range_start == 0) {
 			goto initvars;
-		} else if ((vid - vid_range_end) == 1 &&
+		} else if ((v->vid - vid_range_end) == 1 &&
 			flags == vid_range_flags) {
-			vid_range_end = vid;
+			vid_range_end = v->vid;
 			continue;
 		} else {
 			if ((vid_range_end - vid_range_start) > 0)
@@ -59,8 +58,8 @@
 				num_vlans += 1;
 		}
 initvars:
-		vid_range_start = vid;
-		vid_range_end = vid;
+		vid_range_start = v->vid;
+		vid_range_end = v->vid;
 		vid_range_flags = flags;
 	}
 
@@ -74,28 +73,43 @@
 	return num_vlans;
 }
 
+static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
+				 u32 filter_mask)
+{
+	int num_vlans;
+
+	if (!vg)
+		return 0;
+
+	if (filter_mask & RTEXT_FILTER_BRVLAN)
+		return vg->num_vlans;
+
+	rcu_read_lock();
+	num_vlans = __get_num_vlan_infos(vg, filter_mask);
+	rcu_read_unlock();
+
+	return num_vlans;
+}
+
 static size_t br_get_link_af_size_filtered(const struct net_device *dev,
 					   u32 filter_mask)
 {
-	struct net_port_vlans *pv;
+	struct net_bridge_vlan_group *vg = NULL;
+	struct net_bridge_port *p;
+	struct net_bridge *br;
 	int num_vlan_infos;
 
 	rcu_read_lock();
-	if (br_port_exists(dev))
-		pv = nbp_get_vlan_info(br_port_get_rcu(dev));
-	else if (dev->priv_flags & IFF_EBRIDGE)
-		pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
-	else
-		pv = NULL;
-	if (pv)
-		num_vlan_infos = br_get_num_vlan_infos(pv, filter_mask);
-	else
-		num_vlan_infos = 0;
+	if (br_port_exists(dev)) {
+		p = br_port_get_rcu(dev);
+		vg = nbp_vlan_group(p);
+	} else if (dev->priv_flags & IFF_EBRIDGE) {
+		br = netdev_priv(dev);
+		vg = br_vlan_group(br);
+	}
+	num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
 	rcu_read_unlock();
 
-	if (!num_vlan_infos)
-		return 0;
-
 	/* Each VLAN is returned in bridge_vlan_info along with flags */
 	return num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
 }
@@ -113,6 +127,20 @@
 		+ nla_total_size(1)	/* IFLA_BRPORT_UNICAST_FLOOD */
 		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP */
 		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP_WIFI */
+		+ nla_total_size(sizeof(struct ifla_bridge_id))	/* IFLA_BRPORT_ROOT_ID */
+		+ nla_total_size(sizeof(struct ifla_bridge_id))	/* IFLA_BRPORT_BRIDGE_ID */
+		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_DESIGNATED_PORT */
+		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_DESIGNATED_COST */
+		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_ID */
+		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_NO */
+		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
+		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_CONFIG_PENDING */
+		+ nla_total_size(sizeof(u64))	/* IFLA_BRPORT_MESSAGE_AGE_TIMER */
+		+ nla_total_size(sizeof(u64))	/* IFLA_BRPORT_FORWARD_DELAY_TIMER */
+		+ nla_total_size(sizeof(u64))	/* IFLA_BRPORT_HOLD_TIMER */
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_MULTICAST_ROUTER */
+#endif
 		+ 0;
 }
 
@@ -134,6 +162,7 @@
 			      const struct net_bridge_port *p)
 {
 	u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
+	u64 timerval;
 
 	if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
 	    nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
@@ -146,9 +175,36 @@
 	    nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) ||
 	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
 	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
-		       !!(p->flags & BR_PROXYARP_WIFI)))
+		       !!(p->flags & BR_PROXYARP_WIFI)) ||
+	    nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
+		    &p->designated_root) ||
+	    nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
+		    &p->designated_bridge) ||
+	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
+	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
+	    nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
+	    nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
+	    nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
+		       p->topology_change_ack) ||
+	    nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending))
 		return -EMSGSIZE;
 
+	timerval = br_timer_value(&p->message_age_timer);
+	if (nla_put_u64(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval))
+		return -EMSGSIZE;
+	timerval = br_timer_value(&p->forward_delay_timer);
+	if (nla_put_u64(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval))
+		return -EMSGSIZE;
+	timerval = br_timer_value(&p->hold_timer);
+	if (nla_put_u64(skb, IFLA_BRPORT_HOLD_TIMER, timerval))
+		return -EMSGSIZE;
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+	if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
+		       p->multicast_router))
+		return -EMSGSIZE;
+#endif
+
 	return 0;
 }
 
@@ -185,31 +241,33 @@
 }
 
 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
-					 const struct net_port_vlans *pv)
+					 struct net_bridge_vlan_group *vg)
 {
-	u16 vid_range_start = 0, vid_range_end = 0;
-	u16 vid_range_flags = 0;
-	u16 pvid, vid, flags;
+	struct net_bridge_vlan *v;
+	u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
+	u16 flags, pvid;
 	int err = 0;
 
 	/* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
 	 * and mark vlan info with begin and end flags
 	 * if vlaninfo represents a range
 	 */
-	pvid = br_get_pvid(pv);
-	for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+	pvid = br_get_pvid(vg);
+	list_for_each_entry(v, &vg->vlan_list, vlist) {
 		flags = 0;
-		if (vid == pvid)
+		if (!br_vlan_should_use(v))
+			continue;
+		if (v->vid == pvid)
 			flags |= BRIDGE_VLAN_INFO_PVID;
 
-		if (test_bit(vid, pv->untagged_bitmap))
+		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 			flags |= BRIDGE_VLAN_INFO_UNTAGGED;
 
 		if (vid_range_start == 0) {
 			goto initvars;
-		} else if ((vid - vid_range_end) == 1 &&
+		} else if ((v->vid - vid_range_end) == 1 &&
 			flags == vid_range_flags) {
-			vid_range_end = vid;
+			vid_range_end = v->vid;
 			continue;
 		} else {
 			err = br_fill_ifvlaninfo_range(skb, vid_range_start,
@@ -220,8 +278,8 @@
 		}
 
 initvars:
-		vid_range_start = vid;
-		vid_range_end = vid;
+		vid_range_start = v->vid;
+		vid_range_end = v->vid;
 		vid_range_flags = flags;
 	}
 
@@ -238,19 +296,23 @@
 }
 
 static int br_fill_ifvlaninfo(struct sk_buff *skb,
-			      const struct net_port_vlans *pv)
+			      struct net_bridge_vlan_group *vg)
 {
 	struct bridge_vlan_info vinfo;
-	u16 pvid, vid;
+	struct net_bridge_vlan *v;
+	u16 pvid;
 
-	pvid = br_get_pvid(pv);
-	for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-		vinfo.vid = vid;
+	pvid = br_get_pvid(vg);
+	list_for_each_entry(v, &vg->vlan_list, vlist) {
+		if (!br_vlan_should_use(v))
+			continue;
+
+		vinfo.vid = v->vid;
 		vinfo.flags = 0;
-		if (vid == pvid)
+		if (v->vid == pvid)
 			vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
 
-		if (test_bit(vid, pv->untagged_bitmap))
+		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 			vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
 
 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
@@ -269,11 +331,11 @@
  * Contains port and master info as well as carrier and bridge state.
  */
 static int br_fill_ifinfo(struct sk_buff *skb,
-			  const struct net_bridge_port *port,
+			  struct net_bridge_port *port,
 			  u32 pid, u32 seq, int event, unsigned int flags,
 			  u32 filter_mask, const struct net_device *dev)
 {
-	const struct net_bridge *br;
+	struct net_bridge *br;
 	struct ifinfomsg *hdr;
 	struct nlmsghdr *nlh;
 	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
@@ -320,16 +382,16 @@
 	/* Check if  the VID information is requested */
 	if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
 	    (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
-		const struct net_port_vlans *pv;
+		struct net_bridge_vlan_group *vg;
 		struct nlattr *af;
 		int err;
 
 		if (port)
-			pv = nbp_get_vlan_info(port);
+			vg = nbp_vlan_group(port);
 		else
-			pv = br_get_vlan_info(br);
+			vg = br_vlan_group(br);
 
-		if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
+		if (!vg || !vg->num_vlans)
 			goto done;
 
 		af = nla_nest_start(skb, IFLA_AF_SPEC);
@@ -337,9 +399,9 @@
 			goto nla_put_failure;
 
 		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
-			err = br_fill_ifvlaninfo_compressed(skb, pv);
+			err = br_fill_ifvlaninfo_compressed(skb, vg);
 		else
-			err = br_fill_ifvlaninfo(skb, pv);
+			err = br_fill_ifvlaninfo(skb, vg);
 		if (err)
 			goto nla_put_failure;
 		nla_nest_end(skb, af);
@@ -413,14 +475,14 @@
 	switch (cmd) {
 	case RTM_SETLINK:
 		if (p) {
+			/* if the MASTER flag is set this will act on the global
+			 * per-VLAN entry as well
+			 */
 			err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
 			if (err)
 				break;
-
-			if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
-				err = br_vlan_add(p->br, vinfo->vid,
-						  vinfo->flags);
 		} else {
+			vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
 			err = br_vlan_add(br, vinfo->vid, vinfo->flags);
 		}
 		break;
@@ -462,6 +524,9 @@
 			if (vinfo_start)
 				return -EINVAL;
 			vinfo_start = vinfo;
+			/* don't allow range of pvids */
+			if (vinfo_start->flags & BRIDGE_VLAN_INFO_PVID)
+				return -EINVAL;
 			continue;
 		}
 
@@ -507,6 +572,7 @@
 	[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
 	[IFLA_BRPORT_PROXYARP]	= { .type = NLA_U8 },
 	[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
+	[IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
 };
 
 /* Change the state of the port and notify spanning tree */
@@ -578,6 +644,18 @@
 			return err;
 	}
 
+	if (tb[IFLA_BRPORT_FLUSH])
+		br_fdb_delete_by_port(p->br, p, 0, 0);
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+	if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
+		u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
+
+		err = br_multicast_set_port_router(p, mcast_router);
+		if (err)
+			return err;
+	}
+#endif
 	br_port_flags_change(p, old_flags ^ p->flags);
 	return 0;
 }
@@ -744,6 +822,27 @@
 	[IFLA_BR_PRIORITY] = { .type = NLA_U16 },
 	[IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
 	[IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
+	[IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
+	[IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
+				 .len  = ETH_ALEN },
+	[IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
+	[IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
+	[IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
+	[IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
+	[IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
+	[IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
+	[IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
+	[IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
+	[IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
+	[IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
+	[IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
+	[IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
+	[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
+	[IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
+	[IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
+	[IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
+	[IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
+	[IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
 };
 
 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -774,9 +873,9 @@
 	}
 
 	if (data[IFLA_BR_AGEING_TIME]) {
-		u32 ageing_time = nla_get_u32(data[IFLA_BR_AGEING_TIME]);
-
-		br->ageing_time = clock_t_to_jiffies(ageing_time);
+		err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
+		if (err)
+			return err;
 	}
 
 	if (data[IFLA_BR_STP_STATE]) {
@@ -807,6 +906,158 @@
 		if (err)
 			return err;
 	}
+
+	if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
+		__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
+
+		err = __br_vlan_set_default_pvid(br, defpvid);
+		if (err)
+			return err;
+	}
+#endif
+
+	if (data[IFLA_BR_GROUP_FWD_MASK]) {
+		u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
+
+		if (fwd_mask & BR_GROUPFWD_RESTRICTED)
+			return -EINVAL;
+		br->group_fwd_mask = fwd_mask;
+	}
+
+	if (data[IFLA_BR_GROUP_ADDR]) {
+		u8 new_addr[ETH_ALEN];
+
+		if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
+			return -EINVAL;
+		memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
+		if (!is_link_local_ether_addr(new_addr))
+			return -EINVAL;
+		if (new_addr[5] == 1 ||		/* 802.3x Pause address */
+		    new_addr[5] == 2 ||		/* 802.3ad Slow protocols */
+		    new_addr[5] == 3)		/* 802.1X PAE address */
+			return -EINVAL;
+		spin_lock_bh(&br->lock);
+		memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
+		spin_unlock_bh(&br->lock);
+		br->group_addr_set = true;
+		br_recalculate_fwd_mask(br);
+	}
+
+	if (data[IFLA_BR_FDB_FLUSH])
+		br_fdb_flush(br);
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+	if (data[IFLA_BR_MCAST_ROUTER]) {
+		u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
+
+		err = br_multicast_set_router(br, multicast_router);
+		if (err)
+			return err;
+	}
+
+	if (data[IFLA_BR_MCAST_SNOOPING]) {
+		u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
+
+		err = br_multicast_toggle(br, mcast_snooping);
+		if (err)
+			return err;
+	}
+
+	if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
+		u8 val;
+
+		val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
+		br->multicast_query_use_ifaddr = !!val;
+	}
+
+	if (data[IFLA_BR_MCAST_QUERIER]) {
+		u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
+
+		err = br_multicast_set_querier(br, mcast_querier);
+		if (err)
+			return err;
+	}
+
+	if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) {
+		u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]);
+
+		br->hash_elasticity = val;
+	}
+
+	if (data[IFLA_BR_MCAST_HASH_MAX]) {
+		u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
+
+		err = br_multicast_set_hash_max(br, hash_max);
+		if (err)
+			return err;
+	}
+
+	if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
+		u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
+
+		br->multicast_last_member_count = val;
+	}
+
+	if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
+		u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
+
+		br->multicast_startup_query_count = val;
+	}
+
+	if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
+		u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
+
+		br->multicast_last_member_interval = clock_t_to_jiffies(val);
+	}
+
+	if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
+		u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
+
+		br->multicast_membership_interval = clock_t_to_jiffies(val);
+	}
+
+	if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
+		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
+
+		br->multicast_querier_interval = clock_t_to_jiffies(val);
+	}
+
+	if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
+		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
+
+		br->multicast_query_interval = clock_t_to_jiffies(val);
+	}
+
+	if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
+		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
+
+		br->multicast_query_response_interval = clock_t_to_jiffies(val);
+	}
+
+	if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
+		u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
+
+		br->multicast_startup_query_interval = clock_t_to_jiffies(val);
+	}
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+	if (data[IFLA_BR_NF_CALL_IPTABLES]) {
+		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
+
+		br->nf_call_iptables = val ? true : false;
+	}
+
+	if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
+		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
+
+		br->nf_call_ip6tables = val ? true : false;
+	}
+
+	if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
+		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
+
+		br->nf_call_arptables = val ? true : false;
+	}
 #endif
 
 	return 0;
@@ -823,6 +1074,40 @@
 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_FILTERING */
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
 	       nla_total_size(sizeof(__be16)) +	/* IFLA_BR_VLAN_PROTOCOL */
+	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_VLAN_DEFAULT_PVID */
+#endif
+	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_GROUP_FWD_MASK */
+	       nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_ROOT_ID */
+	       nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_BRIDGE_ID */
+	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_ROOT_PORT */
+	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_ROOT_PATH_COST */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_HELLO_TIMER */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_TCN_TIMER */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_GC_TIMER */
+	       nla_total_size(ETH_ALEN) +       /* IFLA_BR_GROUP_ADDR */
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_ROUTER */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_SNOOPING */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERIER */
+	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_ELASTICITY */
+	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_MAX */
+	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
+	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_QUERIER_INTVL */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_QUERY_INTVL */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IPTABLES */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IP6TABLES */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_ARPTABLES */
 #endif
 	       0;
 }
@@ -837,6 +1122,20 @@
 	u32 stp_enabled = br->stp_enabled;
 	u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
 	u8 vlan_enabled = br_vlan_enabled(br);
+	u64 clockval;
+
+	clockval = br_timer_value(&br->hello_timer);
+	if (nla_put_u64(skb, IFLA_BR_HELLO_TIMER, clockval))
+		return -EMSGSIZE;
+	clockval = br_timer_value(&br->tcn_timer);
+	if (nla_put_u64(skb, IFLA_BR_TCN_TIMER, clockval))
+		return -EMSGSIZE;
+	clockval = br_timer_value(&br->topology_change_timer);
+	if (nla_put_u64(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval))
+		return -EMSGSIZE;
+	clockval = br_timer_value(&br->gc_timer);
+	if (nla_put_u64(skb, IFLA_BR_GC_TIMER, clockval))
+		return -EMSGSIZE;
 
 	if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
 	    nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
@@ -844,11 +1143,66 @@
 	    nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
 	    nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
 	    nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
-	    nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled))
+	    nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
+	    nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
+	    nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
+		    &br->bridge_id) ||
+	    nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
+		    &br->designated_root) ||
+	    nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
+	    nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
+	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
+	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
+		       br->topology_change_detected) ||
+	    nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr))
 		return -EMSGSIZE;
 
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
-	if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto))
+	if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
+	    nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid))
+		return -EMSGSIZE;
+#endif
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+	if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
+	    nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) ||
+	    nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
+		       br->multicast_query_use_ifaddr) ||
+	    nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) ||
+	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY,
+			br->hash_elasticity) ||
+	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
+	    nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
+			br->multicast_last_member_count) ||
+	    nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
+			br->multicast_startup_query_count))
+		return -EMSGSIZE;
+
+	clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
+	if (nla_put_u64(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval))
+		return -EMSGSIZE;
+	clockval = jiffies_to_clock_t(br->multicast_membership_interval);
+	if (nla_put_u64(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval))
+		return -EMSGSIZE;
+	clockval = jiffies_to_clock_t(br->multicast_querier_interval);
+	if (nla_put_u64(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval))
+		return -EMSGSIZE;
+	clockval = jiffies_to_clock_t(br->multicast_query_interval);
+	if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval))
+		return -EMSGSIZE;
+	clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
+	if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval))
+		return -EMSGSIZE;
+	clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
+	if (nla_put_u64(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval))
+		return -EMSGSIZE;
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+	if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
+		       br->nf_call_iptables ? 1 : 0) ||
+	    nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
+		       br->nf_call_ip6tables ? 1 : 0) ||
+	    nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
+		       br->nf_call_arptables ? 1 : 0))
 		return -EMSGSIZE;
 #endif
 
@@ -857,20 +1211,22 @@
 
 static size_t br_get_link_af_size(const struct net_device *dev)
 {
-	struct net_port_vlans *pv;
+	struct net_bridge_port *p;
+	struct net_bridge *br;
+	int num_vlans = 0;
 
-	if (br_port_exists(dev))
-		pv = nbp_get_vlan_info(br_port_get_rtnl(dev));
-	else if (dev->priv_flags & IFF_EBRIDGE)
-		pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
-	else
-		return 0;
-
-	if (!pv)
-		return 0;
+	if (br_port_exists(dev)) {
+		p = br_port_get_rtnl(dev);
+		num_vlans = br_get_num_vlan_infos(nbp_vlan_group(p),
+						  RTEXT_FILTER_BRVLAN);
+	} else if (dev->priv_flags & IFF_EBRIDGE) {
+		br = netdev_priv(dev);
+		num_vlans = br_get_num_vlan_infos(br_vlan_group(br),
+						  RTEXT_FILTER_BRVLAN);
+	}
 
 	/* Each VLAN is returned in bridge_vlan_info along with flags */
-	return pv->num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
+	return num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
 }
 
 static struct rtnl_af_ops br_af_ops __read_mostly = {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 74e99c7..ba0c67b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -20,6 +20,7 @@
 #include <net/route.h>
 #include <net/ip6_fib.h>
 #include <linux/if_vlan.h>
+#include <linux/rhashtable.h>
 
 #define BR_HASH_BITS 8
 #define BR_HASH_SIZE (1 << BR_HASH_BITS)
@@ -28,7 +29,6 @@
 
 #define BR_PORT_BITS	10
 #define BR_MAX_PORTS	(1<<BR_PORT_BITS)
-#define BR_VLAN_BITMAP_LEN	BITS_TO_LONGS(VLAN_N_VID)
 
 #define BR_VERSION	"2.3"
 
@@ -77,17 +77,61 @@
 };
 #endif
 
-struct net_port_vlans {
-	u16				port_idx;
-	u16				pvid;
+/**
+ * struct net_bridge_vlan - per-vlan entry
+ *
+ * @vnode: rhashtable member
+ * @vid: VLAN id
+ * @flags: bridge vlan flags
+ * @br: if MASTER flag set, this points to a bridge struct
+ * @port: if MASTER flag unset, this points to a port struct
+ * @refcnt: if MASTER flag set, this is bumped for each port referencing it
+ * @brvlan: if MASTER flag unset, this points to the global per-VLAN context
+ *          for this VLAN entry
+ * @vlist: sorted list of VLAN entries
+ * @rcu: used for entry destruction
+ *
+ * This structure is shared between the global per-VLAN entries contained in
+ * the bridge rhashtable and the local per-port per-VLAN entries contained in
+ * the port's rhashtable. The union entries should be interpreted depending on
+ * the entry flags that are set.
+ */
+struct net_bridge_vlan {
+	struct rhash_head		vnode;
+	u16				vid;
+	u16				flags;
 	union {
-		struct net_bridge_port		*port;
-		struct net_bridge		*br;
-	}				parent;
+		struct net_bridge	*br;
+		struct net_bridge_port	*port;
+	};
+	union {
+		atomic_t		refcnt;
+		struct net_bridge_vlan	*brvlan;
+	};
+	struct list_head		vlist;
+
 	struct rcu_head			rcu;
-	unsigned long			vlan_bitmap[BR_VLAN_BITMAP_LEN];
-	unsigned long			untagged_bitmap[BR_VLAN_BITMAP_LEN];
+};
+
+/**
+ * struct net_bridge_vlan_group
+ *
+ * @vlan_hash: VLAN entry rhashtable
+ * @vlan_list: sorted VLAN entry list
+ * @num_vlans: number of total VLAN entries
+ * @pvid: PVID VLAN id
+ *
+ * IMPORTANT: Be careful when checking if there're VLAN entries using list
+ *            primitives because the bridge can have entries in its list which
+ *            are just for global context but not for filtering, i.e. they have
+ *            the master flag set but not the brentry flag. If you have to check
+ *            if there're "real" entries in the bridge please test @num_vlans
+ */
+struct net_bridge_vlan_group {
+	struct rhashtable		vlan_hash;
+	struct list_head		vlan_list;
 	u16				num_vlans;
+	u16				pvid;
 };
 
 struct net_bridge_fdb_entry
@@ -185,7 +229,7 @@
 	struct netpoll			*np;
 #endif
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
-	struct net_port_vlans __rcu	*vlan_info;
+	struct net_bridge_vlan_group	*vlgrp;
 #endif
 };
 
@@ -293,10 +337,10 @@
 	struct kobject			*ifobj;
 	u32				auto_cnt;
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
+	struct net_bridge_vlan_group	*vlgrp;
 	u8				vlan_enabled;
 	__be16				vlan_proto;
 	u16				default_pvid;
-	struct net_port_vlans __rcu	*vlan_info;
 #endif
 };
 
@@ -344,6 +388,31 @@
 	return !memcmp(&br->bridge_id, &br->designated_root, 8);
 }
 
+/* check if a VLAN entry is global */
+static inline bool br_vlan_is_master(const struct net_bridge_vlan *v)
+{
+	return v->flags & BRIDGE_VLAN_INFO_MASTER;
+}
+
+/* check if a VLAN entry is used by the bridge */
+static inline bool br_vlan_is_brentry(const struct net_bridge_vlan *v)
+{
+	return v->flags & BRIDGE_VLAN_INFO_BRENTRY;
+}
+
+/* check if we should use the vlan entry, returns false if it's only context */
+static inline bool br_vlan_should_use(const struct net_bridge_vlan *v)
+{
+	if (br_vlan_is_master(v)) {
+		if (br_vlan_is_brentry(v))
+			return true;
+		else
+			return false;
+	}
+
+	return true;
+}
+
 /* br_device.c */
 void br_dev_setup(struct net_device *dev);
 void br_dev_delete(struct net_device *dev, struct list_head *list);
@@ -601,18 +670,19 @@
 
 /* br_vlan.c */
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
-bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
-			struct sk_buff *skb, u16 *vid);
-bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v,
+bool br_allowed_ingress(const struct net_bridge *br,
+			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
+			u16 *vid);
+bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 		       const struct sk_buff *skb);
 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid);
 struct sk_buff *br_handle_vlan(struct net_bridge *br,
-			       const struct net_port_vlans *v,
+			       struct net_bridge_vlan_group *vg,
 			       struct sk_buff *skb);
 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
 int br_vlan_delete(struct net_bridge *br, u16 vid);
 void br_vlan_flush(struct net_bridge *br);
-bool br_vlan_find(struct net_bridge *br, u16 vid);
+struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid);
 void br_recalculate_fwd_mask(struct net_bridge *br);
 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
@@ -620,22 +690,23 @@
 int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
 int br_vlan_init(struct net_bridge *br);
 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val);
+int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid);
 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
 void nbp_vlan_flush(struct net_bridge_port *port);
-bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
 int nbp_vlan_init(struct net_bridge_port *port);
+int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
 
-static inline struct net_port_vlans *br_get_vlan_info(
-						const struct net_bridge *br)
+static inline struct net_bridge_vlan_group *br_vlan_group(
+					const struct net_bridge *br)
 {
-	return rcu_dereference_rtnl(br->vlan_info);
+	return br->vlgrp;
 }
 
-static inline struct net_port_vlans *nbp_get_vlan_info(
-						const struct net_bridge_port *p)
+static inline struct net_bridge_vlan_group *nbp_vlan_group(
+					const struct net_bridge_port *p)
 {
-	return rcu_dereference_rtnl(p->vlan_info);
+	return p->vlgrp;
 }
 
 /* Since bridge now depends on 8021Q module, but the time bridge sees the
@@ -645,9 +716,9 @@
 {
 	int err = 0;
 
-	if (skb_vlan_tag_present(skb))
+	if (skb_vlan_tag_present(skb)) {
 		*vid = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
-	else {
+	} else {
 		*vid = 0;
 		err = -EINVAL;
 	}
@@ -655,13 +726,13 @@
 	return err;
 }
 
-static inline u16 br_get_pvid(const struct net_port_vlans *v)
+static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg)
 {
-	if (!v)
+	if (!vg)
 		return 0;
 
 	smp_rmb();
-	return v->pvid;
+	return vg->pvid;
 }
 
 static inline int br_vlan_enabled(struct net_bridge *br)
@@ -669,16 +740,15 @@
 	return br->vlan_enabled;
 }
 #else
-static inline bool br_allowed_ingress(struct net_bridge *br,
-				      struct net_port_vlans *v,
+static inline bool br_allowed_ingress(const struct net_bridge *br,
+				      struct net_bridge_vlan_group *vg,
 				      struct sk_buff *skb,
 				      u16 *vid)
 {
 	return true;
 }
 
-static inline bool br_allowed_egress(struct net_bridge *br,
-				     const struct net_port_vlans *v,
+static inline bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 				     const struct sk_buff *skb)
 {
 	return true;
@@ -691,7 +761,7 @@
 }
 
 static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
-					     const struct net_port_vlans *v,
+					     struct net_bridge_vlan_group *vg,
 					     struct sk_buff *skb)
 {
 	return skb;
@@ -711,11 +781,6 @@
 {
 }
 
-static inline bool br_vlan_find(struct net_bridge *br, u16 vid)
-{
-	return false;
-}
-
 static inline void br_recalculate_fwd_mask(struct net_bridge *br)
 {
 }
@@ -739,21 +804,11 @@
 {
 }
 
-static inline struct net_port_vlans *br_get_vlan_info(
-						const struct net_bridge *br)
+static inline struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg,
+						   u16 vid)
 {
 	return NULL;
 }
-static inline struct net_port_vlans *nbp_get_vlan_info(
-						const struct net_bridge_port *p)
-{
-	return NULL;
-}
-
-static inline bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
-{
-	return false;
-}
 
 static inline int nbp_vlan_init(struct net_bridge_port *port)
 {
@@ -764,7 +819,8 @@
 {
 	return 0;
 }
-static inline u16 br_get_pvid(const struct net_port_vlans *v)
+
+static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg)
 {
 	return 0;
 }
@@ -779,6 +835,24 @@
 {
 	return -EOPNOTSUPP;
 }
+
+static inline int nbp_get_num_vlan_infos(struct net_bridge_port *p,
+					 u32 filter_mask)
+{
+	return 0;
+}
+
+static inline struct net_bridge_vlan_group *br_vlan_group(
+					const struct net_bridge *br)
+{
+	return NULL;
+}
+
+static inline struct net_bridge_vlan_group *nbp_vlan_group(
+					const struct net_bridge_port *p)
+{
+	return NULL;
+}
 #endif
 
 struct nf_br_ops {
@@ -808,6 +882,7 @@
 int br_set_forward_delay(struct net_bridge *br, unsigned long x);
 int br_set_hello_time(struct net_bridge *br, unsigned long x);
 int br_set_max_age(struct net_bridge *br, unsigned long x);
+int br_set_ageing_time(struct net_bridge *br, u32 ageing_time);
 
 
 /* br_stp_if.c */
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index ed74ffa..db6d243de 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -40,7 +40,7 @@
 void br_set_state(struct net_bridge_port *p, unsigned int state)
 {
 	struct switchdev_attr attr = {
-		.id = SWITCHDEV_ATTR_PORT_STP_STATE,
+		.id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
 		.u.stp_state = state,
 	};
 	int err;
@@ -566,6 +566,29 @@
 
 }
 
+int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
+{
+	struct switchdev_attr attr = {
+		.id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
+		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
+		.u.ageing_time = ageing_time,
+	};
+	unsigned long t = clock_t_to_jiffies(ageing_time);
+	int err;
+
+	if (t < BR_MIN_AGEING_TIME || t > BR_MAX_AGEING_TIME)
+		return -ERANGE;
+
+	err = switchdev_port_attr_set(br->dev, &attr);
+	if (err)
+		return err;
+
+	br->ageing_time = t;
+	mod_timer(&br->gc_timer, jiffies);
+
+	return 0;
+}
+
 void __br_set_forward_delay(struct net_bridge *br, unsigned long t)
 {
 	br->bridge_forward_delay = t;
@@ -576,17 +599,12 @@
 int br_set_forward_delay(struct net_bridge *br, unsigned long val)
 {
 	unsigned long t = clock_t_to_jiffies(val);
-	int err = -ERANGE;
+
+	if (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY)
+		return -ERANGE;
 
 	spin_lock_bh(&br->lock);
-	if (br->stp_enabled != BR_NO_STP &&
-	    (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
-		goto unlock;
-
 	__br_set_forward_delay(br, t);
-	err = 0;
-
-unlock:
 	spin_unlock_bh(&br->lock);
-	return err;
+	return 0;
 }
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 4c97fc5..04ef192 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -102,8 +102,7 @@
 
 static int set_ageing_time(struct net_bridge *br, unsigned long val)
 {
-	br->ageing_time = clock_t_to_jiffies(val);
-	return 0;
+	return br_set_ageing_time(br, val);
 }
 
 static ssize_t ageing_time_store(struct device *d,
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 5f5a02b..ad7e4f6 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -6,86 +6,195 @@
 
 #include "br_private.h"
 
-static void __vlan_add_pvid(struct net_port_vlans *v, u16 vid)
+static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
+			      const void *ptr)
 {
-	if (v->pvid == vid)
+	const struct net_bridge_vlan *vle = ptr;
+	u16 vid = *(u16 *)arg->key;
+
+	return vle->vid != vid;
+}
+
+static const struct rhashtable_params br_vlan_rht_params = {
+	.head_offset = offsetof(struct net_bridge_vlan, vnode),
+	.key_offset = offsetof(struct net_bridge_vlan, vid),
+	.key_len = sizeof(u16),
+	.nelem_hint = 3,
+	.locks_mul = 1,
+	.max_size = VLAN_N_VID,
+	.obj_cmpfn = br_vlan_cmp,
+	.automatic_shrinking = true,
+};
+
+static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
+{
+	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
+}
+
+static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
+{
+	if (vg->pvid == vid)
 		return;
 
 	smp_wmb();
-	v->pvid = vid;
+	vg->pvid = vid;
 }
 
-static void __vlan_delete_pvid(struct net_port_vlans *v, u16 vid)
+static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
 {
-	if (v->pvid != vid)
+	if (vg->pvid != vid)
 		return;
 
 	smp_wmb();
-	v->pvid = 0;
+	vg->pvid = 0;
 }
 
-static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
+static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
 {
-	if (flags & BRIDGE_VLAN_INFO_PVID)
-		__vlan_add_pvid(v, vid);
+	struct net_bridge_vlan_group *vg;
+
+	if (br_vlan_is_master(v))
+		vg = v->br->vlgrp;
 	else
-		__vlan_delete_pvid(v, vid);
+		vg = v->port->vlgrp;
+
+	if (flags & BRIDGE_VLAN_INFO_PVID)
+		__vlan_add_pvid(vg, v->vid);
+	else
+		__vlan_delete_pvid(vg, v->vid);
 
 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
-		set_bit(vid, v->untagged_bitmap);
+		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
 	else
-		clear_bit(vid, v->untagged_bitmap);
+		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
 }
 
 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
 			  u16 vid, u16 flags)
 {
-	const struct net_device_ops *ops = dev->netdev_ops;
+	struct switchdev_obj_port_vlan v = {
+		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+		.flags = flags,
+		.vid_begin = vid,
+		.vid_end = vid,
+	};
 	int err;
 
-	/* If driver uses VLAN ndo ops, use 8021q to install vid
-	 * on device, otherwise try switchdev ops to install vid.
+	/* Try switchdev op first. In case it is not supported, fallback to
+	 * 8021q add.
 	 */
-
-	if (ops->ndo_vlan_rx_add_vid) {
-		err = vlan_vid_add(dev, br->vlan_proto, vid);
-	} else {
-		struct switchdev_obj vlan_obj = {
-			.id = SWITCHDEV_OBJ_PORT_VLAN,
-			.u.vlan = {
-				.flags = flags,
-				.vid_begin = vid,
-				.vid_end = vid,
-			},
-		};
-
-		err = switchdev_port_obj_add(dev, &vlan_obj);
-		if (err == -EOPNOTSUPP)
-			err = 0;
-	}
-
+	err = switchdev_port_obj_add(dev, &v.obj);
+	if (err == -EOPNOTSUPP)
+		return vlan_vid_add(dev, br->vlan_proto, vid);
 	return err;
 }
 
-static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
+static void __vlan_add_list(struct net_bridge_vlan *v)
 {
-	struct net_bridge_port *p = NULL;
-	struct net_bridge *br;
-	struct net_device *dev;
+	struct list_head *headp, *hpos;
+	struct net_bridge_vlan *vent;
+
+	headp = br_vlan_is_master(v) ? &v->br->vlgrp->vlan_list :
+				       &v->port->vlgrp->vlan_list;
+	list_for_each_prev(hpos, headp) {
+		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
+		if (v->vid < vent->vid)
+			continue;
+		else
+			break;
+	}
+	list_add_rcu(&v->vlist, hpos);
+}
+
+static void __vlan_del_list(struct net_bridge_vlan *v)
+{
+	list_del_rcu(&v->vlist);
+}
+
+static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
+			  u16 vid)
+{
+	struct switchdev_obj_port_vlan v = {
+		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+		.vid_begin = vid,
+		.vid_end = vid,
+	};
 	int err;
 
-	if (test_bit(vid, v->vlan_bitmap)) {
-		__vlan_add_flags(v, vid, flags);
+	/* Try switchdev op first. In case it is not supported, fallback to
+	 * 8021q del.
+	 */
+	err = switchdev_port_obj_del(dev, &v.obj);
+	if (err == -EOPNOTSUPP) {
+		vlan_vid_del(dev, br->vlan_proto, vid);
 		return 0;
 	}
+	return err;
+}
 
-	if (v->port_idx) {
-		p = v->parent.port;
+/* Returns a master vlan, if it didn't exist it gets created. In all cases a
+ * a reference is taken to the master vlan before returning.
+ */
+static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
+{
+	struct net_bridge_vlan *masterv;
+
+	masterv = br_vlan_find(br->vlgrp, vid);
+	if (!masterv) {
+		/* missing global ctx, create it now */
+		if (br_vlan_add(br, vid, 0))
+			return NULL;
+		masterv = br_vlan_find(br->vlgrp, vid);
+		if (WARN_ON(!masterv))
+			return NULL;
+	}
+	atomic_inc(&masterv->refcnt);
+
+	return masterv;
+}
+
+static void br_vlan_put_master(struct net_bridge_vlan *masterv)
+{
+	if (!br_vlan_is_master(masterv))
+		return;
+
+	if (atomic_dec_and_test(&masterv->refcnt)) {
+		rhashtable_remove_fast(&masterv->br->vlgrp->vlan_hash,
+				       &masterv->vnode, br_vlan_rht_params);
+		__vlan_del_list(masterv);
+		kfree_rcu(masterv, rcu);
+	}
+}
+
+/* This is the shared VLAN add function which works for both ports and bridge
+ * devices. There are four possible calls to this function in terms of the
+ * vlan entry type:
+ * 1. vlan is being added on a port (no master flags, global entry exists)
+ * 2. vlan is being added on a bridge (both master and brvlan flags)
+ * 3. vlan is being added on a port, but a global entry didn't exist which
+ *    is being created right now (master flag set, brvlan flag unset), the
+ *    global entry is used for global per-vlan features, but not for filtering
+ * 4. same as 3 but with both master and brvlan flags set so the entry
+ *    will be used for filtering in both the port and the bridge
+ */
+static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
+{
+	struct net_bridge_vlan *masterv = NULL;
+	struct net_bridge_port *p = NULL;
+	struct net_bridge_vlan_group *vg;
+	struct net_device *dev;
+	struct net_bridge *br;
+	int err;
+
+	if (br_vlan_is_master(v)) {
+		br = v->br;
+		dev = br->dev;
+		vg = br->vlgrp;
+	} else {
+		p = v->port;
 		br = p->br;
 		dev = p->dev;
-	} else {
-		br = v->parent.br;
-		dev = br->dev;
+		vg = p->vlgrp;
 	}
 
 	if (p) {
@@ -93,116 +202,135 @@
 		 * This ensures tagged traffic enters the bridge when
 		 * promiscuous mode is disabled by br_manage_promisc().
 		 */
-		err = __vlan_vid_add(dev, br, vid, flags);
+		err = __vlan_vid_add(dev, br, v->vid, flags);
 		if (err)
-			return err;
+			goto out;
+
+		/* need to work on the master vlan too */
+		if (flags & BRIDGE_VLAN_INFO_MASTER) {
+			err = br_vlan_add(br, v->vid, flags |
+						      BRIDGE_VLAN_INFO_BRENTRY);
+			if (err)
+				goto out_filt;
+		}
+
+		masterv = br_vlan_get_master(br, v->vid);
+		if (!masterv)
+			goto out_filt;
+		v->brvlan = masterv;
 	}
 
-	err = br_fdb_insert(br, p, dev->dev_addr, vid);
-	if (err) {
-		br_err(br, "failed insert local address into bridge "
-		       "forwarding table\n");
-		goto out_filt;
+	/* Add the dev mac and count the vlan only if it's usable */
+	if (br_vlan_should_use(v)) {
+		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
+		if (err) {
+			br_err(br, "failed insert local address into bridge forwarding table\n");
+			goto out_filt;
+		}
+		vg->num_vlans++;
 	}
 
-	set_bit(vid, v->vlan_bitmap);
-	v->num_vlans++;
-	__vlan_add_flags(v, vid, flags);
+	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
+					    br_vlan_rht_params);
+	if (err)
+		goto out_fdb_insert;
 
-	return 0;
+	__vlan_add_list(v);
+	__vlan_add_flags(v, flags);
+out:
+	return err;
+
+out_fdb_insert:
+	if (br_vlan_should_use(v)) {
+		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
+		vg->num_vlans--;
+	}
 
 out_filt:
-	if (p)
-		vlan_vid_del(dev, br->vlan_proto, vid);
-	return err;
+	if (p) {
+		__vlan_vid_del(dev, br, v->vid);
+		if (masterv) {
+			br_vlan_put_master(masterv);
+			v->brvlan = NULL;
+		}
+	}
+
+	goto out;
 }
 
-static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
-			  u16 vid)
+static int __vlan_del(struct net_bridge_vlan *v)
 {
-	const struct net_device_ops *ops = dev->netdev_ops;
+	struct net_bridge_vlan *masterv = v;
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_port *p = NULL;
 	int err = 0;
 
-	/* If driver uses VLAN ndo ops, use 8021q to delete vid
-	 * on device, otherwise try switchdev ops to delete vid.
-	 */
-
-	if (ops->ndo_vlan_rx_kill_vid) {
-		vlan_vid_del(dev, br->vlan_proto, vid);
+	if (br_vlan_is_master(v)) {
+		vg = v->br->vlgrp;
 	} else {
-		struct switchdev_obj vlan_obj = {
-			.id = SWITCHDEV_OBJ_PORT_VLAN,
-			.u.vlan = {
-				.vid_begin = vid,
-				.vid_end = vid,
-			},
-		};
-
-		err = switchdev_port_obj_del(dev, &vlan_obj);
-		if (err == -EOPNOTSUPP)
-			err = 0;
+		p = v->port;
+		vg = v->port->vlgrp;
+		masterv = v->brvlan;
 	}
 
+	__vlan_delete_pvid(vg, v->vid);
+	if (p) {
+		err = __vlan_vid_del(p->dev, p->br, v->vid);
+		if (err)
+			goto out;
+	}
+
+	if (br_vlan_should_use(v)) {
+		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
+		vg->num_vlans--;
+	}
+
+	if (masterv != v) {
+		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
+				       br_vlan_rht_params);
+		__vlan_del_list(v);
+		kfree_rcu(v, rcu);
+	}
+
+	br_vlan_put_master(masterv);
+out:
 	return err;
 }
 
-static int __vlan_del(struct net_port_vlans *v, u16 vid)
+static void __vlan_flush(struct net_bridge_vlan_group *vlgrp)
 {
-	if (!test_bit(vid, v->vlan_bitmap))
-		return -EINVAL;
+	struct net_bridge_vlan *vlan, *tmp;
 
-	__vlan_delete_pvid(v, vid);
-	clear_bit(vid, v->untagged_bitmap);
-
-	if (v->port_idx) {
-		struct net_bridge_port *p = v->parent.port;
-		int err;
-
-		err = __vlan_vid_del(p->dev, p->br, vid);
-		if (err)
-			return err;
-	}
-
-	clear_bit(vid, v->vlan_bitmap);
-	v->num_vlans--;
-	if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
-		if (v->port_idx)
-			RCU_INIT_POINTER(v->parent.port->vlan_info, NULL);
-		else
-			RCU_INIT_POINTER(v->parent.br->vlan_info, NULL);
-		kfree_rcu(v, rcu);
-	}
-	return 0;
-}
-
-static void __vlan_flush(struct net_port_vlans *v)
-{
-	smp_wmb();
-	v->pvid = 0;
-	bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
-	if (v->port_idx)
-		RCU_INIT_POINTER(v->parent.port->vlan_info, NULL);
-	else
-		RCU_INIT_POINTER(v->parent.br->vlan_info, NULL);
-	kfree_rcu(v, rcu);
+	__vlan_delete_pvid(vlgrp, vlgrp->pvid);
+	list_for_each_entry_safe(vlan, tmp, &vlgrp->vlan_list, vlist)
+		__vlan_del(vlan);
+	rhashtable_destroy(&vlgrp->vlan_hash);
+	kfree(vlgrp);
 }
 
 struct sk_buff *br_handle_vlan(struct net_bridge *br,
-			       const struct net_port_vlans *pv,
+			       struct net_bridge_vlan_group *vg,
 			       struct sk_buff *skb)
 {
+	struct net_bridge_vlan *v;
 	u16 vid;
 
 	/* If this packet was not filtered at input, let it pass */
 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 		goto out;
 
-	/* Vlan filter table must be configured at this point.  The
+	/* At this point, we know that the frame was filtered and contains
+	 * a valid vlan id.  If the vlan id has untagged flag set,
+	 * send untagged; otherwise, send tagged.
+	 */
+	br_vlan_get_tag(skb, &vid);
+	v = br_vlan_find(vg, vid);
+	/* Vlan entry must be configured at this point.  The
 	 * only exception is the bridge is set in promisc mode and the
 	 * packet is destined for the bridge device.  In this case
 	 * pass the packet as is.
 	 */
-	if (!pv) {
+	if (!v || !br_vlan_should_use(v)) {
 		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
 			goto out;
 		} else {
@@ -210,13 +338,7 @@
 			return NULL;
 		}
 	}
-
-	/* At this point, we know that the frame was filtered and contains
-	 * a valid vlan id.  If the vlan id is set in the untagged bitmap,
-	 * send untagged; otherwise, send tagged.
-	 */
-	br_vlan_get_tag(skb, &vid);
-	if (test_bit(vid, pv->untagged_bitmap))
+	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 		skb->vlan_tci = 0;
 
 out:
@@ -224,29 +346,13 @@
 }
 
 /* Called under RCU */
-bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
-			struct sk_buff *skb, u16 *vid)
+static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
+			      struct sk_buff *skb, u16 *vid)
 {
+	const struct net_bridge_vlan *v;
 	bool tagged;
-	__be16 proto;
-
-	/* If VLAN filtering is disabled on the bridge, all packets are
-	 * permitted.
-	 */
-	if (!br->vlan_enabled) {
-		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
-		return true;
-	}
-
-	/* If there are no vlan in the permitted list, all packets are
-	 * rejected.
-	 */
-	if (!v)
-		goto drop;
 
 	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
-	proto = br->vlan_proto;
-
 	/* If vlan tx offload is disabled on bridge device and frame was
 	 * sent from vlan device on the bridge device, it does not have
 	 * HW accelerated vlan tag.
@@ -281,7 +387,7 @@
 	}
 
 	if (!*vid) {
-		u16 pvid = br_get_pvid(v);
+		u16 pvid = br_get_pvid(vg);
 
 		/* Frame had a tag with VID 0 or did not have a tag.
 		 * See if pvid is set on this port.  That tells us which
@@ -309,29 +415,43 @@
 	}
 
 	/* Frame had a valid vlan tag.  See if vlan is allowed */
-	if (test_bit(*vid, v->vlan_bitmap))
+	v = br_vlan_find(vg, *vid);
+	if (v && br_vlan_should_use(v))
 		return true;
 drop:
 	kfree_skb(skb);
 	return false;
 }
 
+bool br_allowed_ingress(const struct net_bridge *br,
+			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
+			u16 *vid)
+{
+	/* If VLAN filtering is disabled on the bridge, all packets are
+	 * permitted.
+	 */
+	if (!br->vlan_enabled) {
+		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
+		return true;
+	}
+
+	return __allowed_ingress(vg, br->vlan_proto, skb, vid);
+}
+
 /* Called under RCU. */
-bool br_allowed_egress(struct net_bridge *br,
-		       const struct net_port_vlans *v,
+bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 		       const struct sk_buff *skb)
 {
+	const struct net_bridge_vlan *v;
 	u16 vid;
 
 	/* If this packet was not filtered at input, let it pass */
 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 		return true;
 
-	if (!v)
-		return false;
-
 	br_vlan_get_tag(skb, &vid);
-	if (test_bit(vid, v->vlan_bitmap))
+	v = br_vlan_find(vg, vid);
+	if (v && br_vlan_should_use(v))
 		return true;
 
 	return false;
@@ -340,29 +460,29 @@
 /* Called under RCU */
 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
 {
+	struct net_bridge_vlan_group *vg;
 	struct net_bridge *br = p->br;
-	struct net_port_vlans *v;
 
 	/* If filtering was disabled at input, let it pass. */
 	if (!br->vlan_enabled)
 		return true;
 
-	v = rcu_dereference(p->vlan_info);
-	if (!v)
+	vg = p->vlgrp;
+	if (!vg || !vg->num_vlans)
 		return false;
 
 	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
 		*vid = 0;
 
 	if (!*vid) {
-		*vid = br_get_pvid(v);
+		*vid = br_get_pvid(vg);
 		if (!*vid)
 			return false;
 
 		return true;
 	}
 
-	if (test_bit(*vid, v->vlan_bitmap))
+	if (br_vlan_find(vg, *vid))
 		return true;
 
 	return false;
@@ -373,31 +493,47 @@
  */
 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
 {
-	struct net_port_vlans *pv = NULL;
-	int err;
+	struct net_bridge_vlan *vlan;
+	int ret;
 
 	ASSERT_RTNL();
 
-	pv = rtnl_dereference(br->vlan_info);
-	if (pv)
-		return __vlan_add(pv, vid, flags);
+	vlan = br_vlan_find(br->vlgrp, vid);
+	if (vlan) {
+		if (!br_vlan_is_brentry(vlan)) {
+			/* Trying to change flags of non-existent bridge vlan */
+			if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
+				return -EINVAL;
+			/* It was only kept for port vlans, now make it real */
+			ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
+					    vlan->vid);
+			if (ret) {
+				br_err(br, "failed insert local address into bridge forwarding table\n");
+				return ret;
+			}
+			atomic_inc(&vlan->refcnt);
+			vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
+			br->vlgrp->num_vlans++;
+		}
+		__vlan_add_flags(vlan, flags);
+		return 0;
+	}
 
-	/* Create port vlan infomration
-	 */
-	pv = kzalloc(sizeof(*pv), GFP_KERNEL);
-	if (!pv)
+	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+	if (!vlan)
 		return -ENOMEM;
 
-	pv->parent.br = br;
-	err = __vlan_add(pv, vid, flags);
-	if (err)
-		goto out;
+	vlan->vid = vid;
+	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
+	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
+	vlan->br = br;
+	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
+		atomic_set(&vlan->refcnt, 1);
+	ret = __vlan_add(vlan, flags);
+	if (ret)
+		kfree(vlan);
 
-	rcu_assign_pointer(br->vlan_info, pv);
-	return 0;
-out:
-	kfree(pv);
-	return err;
+	return ret;
 }
 
 /* Must be protected by RTNL.
@@ -405,49 +541,33 @@
  */
 int br_vlan_delete(struct net_bridge *br, u16 vid)
 {
-	struct net_port_vlans *pv;
+	struct net_bridge_vlan *v;
 
 	ASSERT_RTNL();
 
-	pv = rtnl_dereference(br->vlan_info);
-	if (!pv)
-		return -EINVAL;
+	v = br_vlan_find(br->vlgrp, vid);
+	if (!v || !br_vlan_is_brentry(v))
+		return -ENOENT;
 
 	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
+	br_fdb_delete_by_port(br, NULL, vid, 0);
 
-	__vlan_del(pv, vid);
-	return 0;
+	return __vlan_del(v);
 }
 
 void br_vlan_flush(struct net_bridge *br)
 {
-	struct net_port_vlans *pv;
-
 	ASSERT_RTNL();
-	pv = rtnl_dereference(br->vlan_info);
-	if (!pv)
-		return;
 
-	__vlan_flush(pv);
+	__vlan_flush(br_vlan_group(br));
 }
 
-bool br_vlan_find(struct net_bridge *br, u16 vid)
+struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
 {
-	struct net_port_vlans *pv;
-	bool found = false;
+	if (!vg)
+		return NULL;
 
-	rcu_read_lock();
-	pv = rcu_dereference(br->vlan_info);
-
-	if (!pv)
-		goto out;
-
-	if (test_bit(vid, pv->vlan_bitmap))
-		found = true;
-
-out:
-	rcu_read_unlock();
-	return found;
+	return br_vlan_lookup(&vg->vlan_hash, vid);
 }
 
 /* Must be protected by RTNL. */
@@ -505,21 +625,16 @@
 {
 	int err = 0;
 	struct net_bridge_port *p;
-	struct net_port_vlans *pv;
+	struct net_bridge_vlan *vlan;
 	__be16 oldproto;
-	u16 vid, errvid;
 
 	if (br->vlan_proto == proto)
 		return 0;
 
 	/* Add VLANs for the new proto to the device filter. */
 	list_for_each_entry(p, &br->port_list, list) {
-		pv = rtnl_dereference(p->vlan_info);
-		if (!pv)
-			continue;
-
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-			err = vlan_vid_add(p->dev, proto, vid);
+		list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist) {
+			err = vlan_vid_add(p->dev, proto, vlan->vid);
 			if (err)
 				goto err_filt;
 		}
@@ -532,30 +647,19 @@
 	br_recalculate_fwd_mask(br);
 
 	/* Delete VLANs for the old proto from the device filter. */
-	list_for_each_entry(p, &br->port_list, list) {
-		pv = rtnl_dereference(p->vlan_info);
-		if (!pv)
-			continue;
-
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
-			vlan_vid_del(p->dev, oldproto, vid);
-	}
+	list_for_each_entry(p, &br->port_list, list)
+		list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
+			vlan_vid_del(p->dev, oldproto, vlan->vid);
 
 	return 0;
 
 err_filt:
-	errvid = vid;
-	for_each_set_bit(vid, pv->vlan_bitmap, errvid)
-		vlan_vid_del(p->dev, proto, vid);
+	list_for_each_entry_continue_reverse(vlan, &p->vlgrp->vlan_list, vlist)
+		vlan_vid_del(p->dev, proto, vlan->vid);
 
-	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
-		pv = rtnl_dereference(p->vlan_info);
-		if (!pv)
-			continue;
-
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
-			vlan_vid_del(p->dev, proto, vid);
-	}
+	list_for_each_entry_continue_reverse(p, &br->port_list, list)
+		list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
+			vlan_vid_del(p->dev, proto, vlan->vid);
 
 	return err;
 }
@@ -576,9 +680,19 @@
 	return err;
 }
 
-static bool vlan_default_pvid(struct net_port_vlans *pv, u16 vid)
+static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
 {
-	return pv && vid == pv->pvid && test_bit(vid, pv->untagged_bitmap);
+	struct net_bridge_vlan *v;
+
+	if (vid != vg->pvid)
+		return false;
+
+	v = br_vlan_lookup(&vg->vlan_hash, vid);
+	if (v && br_vlan_should_use(v) &&
+	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
+		return true;
+
+	return false;
 }
 
 static void br_vlan_disable_default_pvid(struct net_bridge *br)
@@ -589,24 +703,30 @@
 	/* Disable default_pvid on all ports where it is still
 	 * configured.
 	 */
-	if (vlan_default_pvid(br_get_vlan_info(br), pvid))
+	if (vlan_default_pvid(br->vlgrp, pvid))
 		br_vlan_delete(br, pvid);
 
 	list_for_each_entry(p, &br->port_list, list) {
-		if (vlan_default_pvid(nbp_get_vlan_info(p), pvid))
+		if (vlan_default_pvid(p->vlgrp, pvid))
 			nbp_vlan_delete(p, pvid);
 	}
 
 	br->default_pvid = 0;
 }
 
-static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
+int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
 {
+	const struct net_bridge_vlan *pvent;
 	struct net_bridge_port *p;
 	u16 old_pvid;
 	int err = 0;
 	unsigned long *changed;
 
+	if (!pvid) {
+		br_vlan_disable_default_pvid(br);
+		return 0;
+	}
+
 	changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
 			  GFP_KERNEL);
 	if (!changed)
@@ -617,11 +737,13 @@
 	/* Update default_pvid config only if we do not conflict with
 	 * user configuration.
 	 */
-	if ((!old_pvid || vlan_default_pvid(br_get_vlan_info(br), old_pvid)) &&
-	    !br_vlan_find(br, pvid)) {
+	pvent = br_vlan_find(br->vlgrp, pvid);
+	if ((!old_pvid || vlan_default_pvid(br->vlgrp, old_pvid)) &&
+	    (!pvent || !br_vlan_should_use(pvent))) {
 		err = br_vlan_add(br, pvid,
 				  BRIDGE_VLAN_INFO_PVID |
-				  BRIDGE_VLAN_INFO_UNTAGGED);
+				  BRIDGE_VLAN_INFO_UNTAGGED |
+				  BRIDGE_VLAN_INFO_BRENTRY);
 		if (err)
 			goto out;
 		br_vlan_delete(br, old_pvid);
@@ -633,8 +755,8 @@
 		 * user configuration.
 		 */
 		if ((old_pvid &&
-		     !vlan_default_pvid(nbp_get_vlan_info(p), old_pvid)) ||
-		    nbp_vlan_find(p, pvid))
+		     !vlan_default_pvid(p->vlgrp, old_pvid)) ||
+		    br_vlan_find(p->vlgrp, pvid))
 			continue;
 
 		err = nbp_vlan_add(p, pvid,
@@ -668,7 +790,8 @@
 		if (old_pvid)
 			br_vlan_add(br, old_pvid,
 				    BRIDGE_VLAN_INFO_PVID |
-				    BRIDGE_VLAN_INFO_UNTAGGED);
+				    BRIDGE_VLAN_INFO_UNTAGGED |
+				    BRIDGE_VLAN_INFO_BRENTRY);
 		br_vlan_delete(br, pvid);
 	}
 	goto out;
@@ -694,12 +817,7 @@
 		err = -EPERM;
 		goto unlock;
 	}
-
-	if (!pvid)
-		br_vlan_disable_default_pvid(br);
-	else
-		err = __br_vlan_set_default_pvid(br, pvid);
-
+	err = __br_vlan_set_default_pvid(br, pvid);
 unlock:
 	rtnl_unlock();
 	return err;
@@ -707,10 +825,66 @@
 
 int br_vlan_init(struct net_bridge *br)
 {
+	int ret = -ENOMEM;
+
+	br->vlgrp = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
+	if (!br->vlgrp)
+		goto out;
+	ret = rhashtable_init(&br->vlgrp->vlan_hash, &br_vlan_rht_params);
+	if (ret)
+		goto err_rhtbl;
+	INIT_LIST_HEAD(&br->vlgrp->vlan_list);
 	br->vlan_proto = htons(ETH_P_8021Q);
 	br->default_pvid = 1;
-	return br_vlan_add(br, 1,
-			   BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED);
+	ret = br_vlan_add(br, 1,
+			  BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
+			  BRIDGE_VLAN_INFO_BRENTRY);
+	if (ret)
+		goto err_vlan_add;
+
+out:
+	return ret;
+
+err_vlan_add:
+	rhashtable_destroy(&br->vlgrp->vlan_hash);
+err_rhtbl:
+	kfree(br->vlgrp);
+
+	goto out;
+}
+
+int nbp_vlan_init(struct net_bridge_port *p)
+{
+	struct net_bridge_vlan_group *vg;
+	int ret = -ENOMEM;
+
+	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
+	if (!vg)
+		goto out;
+
+	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
+	if (ret)
+		goto err_rhtbl;
+	INIT_LIST_HEAD(&vg->vlan_list);
+	/* Make sure everything's committed before publishing vg */
+	smp_wmb();
+	p->vlgrp = vg;
+	if (p->br->default_pvid) {
+		ret = nbp_vlan_add(p, p->br->default_pvid,
+				   BRIDGE_VLAN_INFO_PVID |
+				   BRIDGE_VLAN_INFO_UNTAGGED);
+		if (ret)
+			goto err_vlan_add;
+	}
+out:
+	return ret;
+
+err_vlan_add:
+	rhashtable_destroy(&vg->vlan_hash);
+err_rhtbl:
+	kfree(vg);
+
+	goto out;
 }
 
 /* Must be protected by RTNL.
@@ -718,35 +892,28 @@
  */
 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
 {
-	struct net_port_vlans *pv = NULL;
-	int err;
+	struct net_bridge_vlan *vlan;
+	int ret;
 
 	ASSERT_RTNL();
 
-	pv = rtnl_dereference(port->vlan_info);
-	if (pv)
-		return __vlan_add(pv, vid, flags);
-
-	/* Create port vlan infomration
-	 */
-	pv = kzalloc(sizeof(*pv), GFP_KERNEL);
-	if (!pv) {
-		err = -ENOMEM;
-		goto clean_up;
+	vlan = br_vlan_find(port->vlgrp, vid);
+	if (vlan) {
+		__vlan_add_flags(vlan, flags);
+		return 0;
 	}
 
-	pv->port_idx = port->port_no;
-	pv->parent.port = port;
-	err = __vlan_add(pv, vid, flags);
-	if (err)
-		goto clean_up;
+	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+	if (!vlan)
+		return -ENOMEM;
 
-	rcu_assign_pointer(port->vlan_info, pv);
-	return 0;
+	vlan->vid = vid;
+	vlan->port = port;
+	ret = __vlan_add(vlan, flags);
+	if (ret)
+		kfree(vlan);
 
-clean_up:
-	kfree(pv);
-	return err;
+	return ret;
 }
 
 /* Must be protected by RTNL.
@@ -754,61 +921,27 @@
  */
 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
 {
-	struct net_port_vlans *pv;
+	struct net_bridge_vlan *v;
 
 	ASSERT_RTNL();
 
-	pv = rtnl_dereference(port->vlan_info);
-	if (!pv)
-		return -EINVAL;
-
+	v = br_vlan_find(port->vlgrp, vid);
+	if (!v)
+		return -ENOENT;
 	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
 	br_fdb_delete_by_port(port->br, port, vid, 0);
 
-	return __vlan_del(pv, vid);
+	return __vlan_del(v);
 }
 
 void nbp_vlan_flush(struct net_bridge_port *port)
 {
-	struct net_port_vlans *pv;
-	u16 vid;
+	struct net_bridge_vlan *vlan;
 
 	ASSERT_RTNL();
 
-	pv = rtnl_dereference(port->vlan_info);
-	if (!pv)
-		return;
+	list_for_each_entry(vlan, &port->vlgrp->vlan_list, vlist)
+		vlan_vid_del(port->dev, port->br->vlan_proto, vlan->vid);
 
-	for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
-		vlan_vid_del(port->dev, port->br->vlan_proto, vid);
-
-	__vlan_flush(pv);
-}
-
-bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
-{
-	struct net_port_vlans *pv;
-	bool found = false;
-
-	rcu_read_lock();
-	pv = rcu_dereference(port->vlan_info);
-
-	if (!pv)
-		goto out;
-
-	if (test_bit(vid, pv->vlan_bitmap))
-		found = true;
-
-out:
-	rcu_read_unlock();
-	return found;
-}
-
-int nbp_vlan_init(struct net_bridge_port *p)
-{
-	return p->br->default_pvid ?
-			nbp_vlan_add(p, p->br->default_pvid,
-				     BRIDGE_VLAN_INFO_PVID |
-				     BRIDGE_VLAN_INFO_UNTAGGED) :
-			0;
+	__vlan_flush(nbp_vlan_group(port));
 }
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 17f2e4b..0ad639a 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -180,7 +180,7 @@
 {
 	const struct ebt_log_info *info = par->targinfo;
 	struct nf_loginfo li;
-	struct net *net = dev_net(par->in ? par->in : par->out);
+	struct net *net = par->net;
 
 	li.type = NF_LOG_TYPE_LOG;
 	li.u.log.level = info->loglevel;
diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c
index 59ac795..5481615 100644
--- a/net/bridge/netfilter/ebt_nflog.c
+++ b/net/bridge/netfilter/ebt_nflog.c
@@ -24,7 +24,7 @@
 {
 	const struct ebt_nflog_info *info = par->targinfo;
 	struct nf_loginfo li;
-	struct net *net = dev_net(par->in ? par->in : par->out);
+	struct net *net = par->net;
 
 	li.type = NF_LOG_TYPE_ULOG;
 	li.u.ulog.copy_len = info->len;
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index d2cdf5d6..ec94c6f 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -50,10 +50,14 @@
 
 static int ebt_broute(struct sk_buff *skb)
 {
+	struct nf_hook_state state;
 	int ret;
 
-	ret = ebt_do_table(NF_BR_BROUTING, skb, skb->dev, NULL,
-			   dev_net(skb->dev)->xt.broute_table);
+	nf_hook_state_init(&state, NULL, NF_BR_BROUTING, INT_MIN,
+			   NFPROTO_BRIDGE, skb->dev, NULL, NULL,
+			   dev_net(skb->dev), NULL);
+
+	ret = ebt_do_table(skb, &state, state.net->xt.broute_table);
 	if (ret == NF_DROP)
 		return 1; /* route it */
 	return 0; /* bridge it */
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index ab20d6e..f9242df 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -57,19 +57,17 @@
 };
 
 static unsigned int
-ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ebt_in_hook(void *priv, struct sk_buff *skb,
 	    const struct nf_hook_state *state)
 {
-	return ebt_do_table(ops->hooknum, skb, state->in, state->out,
-			    state->net->xt.frame_filter);
+	return ebt_do_table(skb, state, state->net->xt.frame_filter);
 }
 
 static unsigned int
-ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ebt_out_hook(void *priv, struct sk_buff *skb,
 	     const struct nf_hook_state *state)
 {
-	return ebt_do_table(ops->hooknum, skb, state->in, state->out,
-			    state->net->xt.frame_filter);
+	return ebt_do_table(skb, state, state->net->xt.frame_filter);
 }
 
 static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index ad81a5a..4bbefe0 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -57,19 +57,17 @@
 };
 
 static unsigned int
-ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ebt_nat_in(void *priv, struct sk_buff *skb,
 	   const struct nf_hook_state *state)
 {
-	return ebt_do_table(ops->hooknum, skb, state->in, state->out,
-			    state->net->xt.frame_nat);
+	return ebt_do_table(skb, state, state->net->xt.frame_nat);
 }
 
 static unsigned int
-ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ebt_nat_out(void *priv, struct sk_buff *skb,
 	    const struct nf_hook_state *state)
 {
-	return ebt_do_table(ops->hooknum, skb, state->in, state->out,
-			    state->net->xt.frame_nat);
+	return ebt_do_table(skb, state, state->net->xt.frame_nat);
 }
 
 static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 48b6b01..f46ca41 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -183,10 +183,11 @@
 }
 
 /* Do some firewalling */
-unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
-   const struct net_device *in, const struct net_device *out,
-   struct ebt_table *table)
+unsigned int ebt_do_table(struct sk_buff *skb,
+			  const struct nf_hook_state *state,
+			  struct ebt_table *table)
 {
+	unsigned int hook = state->hook;
 	int i, nentries;
 	struct ebt_entry *point;
 	struct ebt_counter *counter_base, *cb_base;
@@ -199,8 +200,9 @@
 	struct xt_action_param acpar;
 
 	acpar.family  = NFPROTO_BRIDGE;
-	acpar.in      = in;
-	acpar.out     = out;
+	acpar.net     = state->net;
+	acpar.in      = state->in;
+	acpar.out     = state->out;
 	acpar.hotdrop = false;
 	acpar.hooknum = hook;
 
@@ -220,7 +222,7 @@
 	base = private->entries;
 	i = 0;
 	while (i < nentries) {
-		if (ebt_basic_match(point, skb, in, out))
+		if (ebt_basic_match(point, skb, state->in, state->out))
 			goto letscontinue;
 
 		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index a343e62..62f6b1b 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -65,31 +65,29 @@
 EXPORT_SYMBOL_GPL(nft_bridge_ip6hdr_validate);
 
 static inline void nft_bridge_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
-					       const struct nf_hook_ops *ops,
 					       struct sk_buff *skb,
 					       const struct nf_hook_state *state)
 {
 	if (nft_bridge_iphdr_validate(skb))
-		nft_set_pktinfo_ipv4(pkt, ops, skb, state);
+		nft_set_pktinfo_ipv4(pkt, skb, state);
 	else
-		nft_set_pktinfo(pkt, ops, skb, state);
+		nft_set_pktinfo(pkt, skb, state);
 }
 
 static inline void nft_bridge_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
-					       const struct nf_hook_ops *ops,
 					       struct sk_buff *skb,
 					       const struct nf_hook_state *state)
 {
 #if IS_ENABLED(CONFIG_IPV6)
 	if (nft_bridge_ip6hdr_validate(skb) &&
-	    nft_set_pktinfo_ipv6(pkt, ops, skb, state) == 0)
+	    nft_set_pktinfo_ipv6(pkt, skb, state) == 0)
 		return;
 #endif
-	nft_set_pktinfo(pkt, ops, skb, state);
+	nft_set_pktinfo(pkt, skb, state);
 }
 
 static unsigned int
-nft_do_chain_bridge(const struct nf_hook_ops *ops,
+nft_do_chain_bridge(void *priv,
 		    struct sk_buff *skb,
 		    const struct nf_hook_state *state)
 {
@@ -97,17 +95,17 @@
 
 	switch (eth_hdr(skb)->h_proto) {
 	case htons(ETH_P_IP):
-		nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, state);
+		nft_bridge_set_pktinfo_ipv4(&pkt, skb, state);
 		break;
 	case htons(ETH_P_IPV6):
-		nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, state);
+		nft_bridge_set_pktinfo_ipv6(&pkt, skb, state);
 		break;
 	default:
-		nft_set_pktinfo(&pkt, ops, skb, state);
+		nft_set_pktinfo(&pkt, skb, state);
 		break;
 	}
 
-	return nft_do_chain(&pkt, ops);
+	return nft_do_chain(&pkt, priv);
 }
 
 static struct nft_af_info nft_af_bridge __read_mostly = {
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 858d848..fdba3d9 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -261,7 +261,6 @@
 				   const struct nft_pktinfo *pkt)
 {
 	struct nft_reject *priv = nft_expr_priv(expr);
-	struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
 	const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
 
 	if (is_broadcast_ether_addr(dest) ||
@@ -273,16 +272,16 @@
 		switch (priv->type) {
 		case NFT_REJECT_ICMP_UNREACH:
 			nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
-						      pkt->ops->hooknum,
+						      pkt->hook,
 						      priv->icmp_code);
 			break;
 		case NFT_REJECT_TCP_RST:
 			nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in,
-							pkt->ops->hooknum);
+							pkt->hook);
 			break;
 		case NFT_REJECT_ICMPX_UNREACH:
 			nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
-						      pkt->ops->hooknum,
+						      pkt->hook,
 						      nft_reject_icmp_code(priv->icmp_code));
 			break;
 		}
@@ -290,17 +289,17 @@
 	case htons(ETH_P_IPV6):
 		switch (priv->type) {
 		case NFT_REJECT_ICMP_UNREACH:
-			nft_reject_br_send_v6_unreach(net, pkt->skb, pkt->in,
-						      pkt->ops->hooknum,
+			nft_reject_br_send_v6_unreach(pkt->net, pkt->skb,
+						      pkt->in, pkt->hook,
 						      priv->icmp_code);
 			break;
 		case NFT_REJECT_TCP_RST:
-			nft_reject_br_send_v6_tcp_reset(net, pkt->skb, pkt->in,
-							pkt->ops->hooknum);
+			nft_reject_br_send_v6_tcp_reset(pkt->net, pkt->skb,
+							pkt->in, pkt->hook);
 			break;
 		case NFT_REJECT_ICMPX_UNREACH:
-			nft_reject_br_send_v6_unreach(net, pkt->skb, pkt->in,
-						      pkt->ops->hooknum,
+			nft_reject_br_send_v6_unreach(pkt->net, pkt->skb,
+						      pkt->in, pkt->hook,
 						      nft_reject_icmpv6_code(priv->icmp_code));
 			break;
 		}
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 69a4d30..54a00d6 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -357,6 +357,7 @@
 	opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
 	opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT;
 	opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;
+	opt->monc_ping_timeout = CEPH_MONC_PING_TIMEOUT_DEFAULT;
 
 	/* get mon ip(s) */
 	/* ip1[:port1][,ip2[:port2]...] */
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 790fe89..4440edc 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -79,10 +79,6 @@
 	return 0;
 }
 
-
-
-#define AES_KEY_SIZE 16
-
 static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
 {
 	return crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index e3be1d2..b9b0e3b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -163,6 +163,7 @@
 static char tag_msg = CEPH_MSGR_TAG_MSG;
 static char tag_ack = CEPH_MSGR_TAG_ACK;
 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
+static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2;
 
 #ifdef CONFIG_LOCKDEP
 static struct lock_class_key socket_class;
@@ -176,7 +177,7 @@
 
 static void queue_con(struct ceph_connection *con);
 static void cancel_con(struct ceph_connection *con);
-static void con_work(struct work_struct *);
+static void ceph_con_workfn(struct work_struct *);
 static void con_fault(struct ceph_connection *con);
 
 /*
@@ -276,22 +277,22 @@
 		ceph_msgr_wq = NULL;
 	}
 
-	ceph_msgr_slab_exit();
-
 	BUG_ON(zero_page == NULL);
 	page_cache_release(zero_page);
 	zero_page = NULL;
+
+	ceph_msgr_slab_exit();
 }
 
 int ceph_msgr_init(void)
 {
+	if (ceph_msgr_slab_init())
+		return -ENOMEM;
+
 	BUG_ON(zero_page != NULL);
 	zero_page = ZERO_PAGE(0);
 	page_cache_get(zero_page);
 
-	if (ceph_msgr_slab_init())
-		return -ENOMEM;
-
 	/*
 	 * The number of active work items is limited by the number of
 	 * connections, so leave @max_active at default.
@@ -749,7 +750,7 @@
 	mutex_init(&con->mutex);
 	INIT_LIST_HEAD(&con->out_queue);
 	INIT_LIST_HEAD(&con->out_sent);
-	INIT_DELAYED_WORK(&con->work, con_work);
+	INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
 
 	con->state = CON_STATE_CLOSED;
 }
@@ -1351,7 +1352,16 @@
 {
 	dout("prepare_write_keepalive %p\n", con);
 	con_out_kvec_reset(con);
-	con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
+	if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) {
+		struct timespec now = CURRENT_TIME;
+
+		con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2);
+		ceph_encode_timespec(&con->out_temp_keepalive2, &now);
+		con_out_kvec_add(con, sizeof(con->out_temp_keepalive2),
+				 &con->out_temp_keepalive2);
+	} else {
+		con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive);
+	}
 	con_flag_set(con, CON_FLAG_WRITE_PENDING);
 }
 
@@ -1625,6 +1635,12 @@
 	con->in_tag = CEPH_MSGR_TAG_READY;
 }
 
+static void prepare_read_keepalive_ack(struct ceph_connection *con)
+{
+	dout("prepare_read_keepalive_ack %p\n", con);
+	con->in_base_pos = 0;
+}
+
 /*
  * Prepare to read a message.
  */
@@ -2322,13 +2338,6 @@
 			return ret;
 
 		BUG_ON(!con->in_msg ^ skip);
-		if (con->in_msg && data_len > con->in_msg->data_length) {
-			pr_warn("%s skipping long message (%u > %zd)\n",
-				__func__, data_len, con->in_msg->data_length);
-			ceph_msg_put(con->in_msg);
-			con->in_msg = NULL;
-			skip = 1;
-		}
 		if (skip) {
 			/* skip this message */
 			dout("alloc_msg said skip message\n");
@@ -2457,6 +2466,17 @@
 	mutex_lock(&con->mutex);
 }
 
+static int read_keepalive_ack(struct ceph_connection *con)
+{
+	struct ceph_timespec ceph_ts;
+	size_t size = sizeof(ceph_ts);
+	int ret = read_partial(con, size, size, &ceph_ts);
+	if (ret <= 0)
+		return ret;
+	ceph_decode_timespec(&con->last_keepalive_ack, &ceph_ts);
+	prepare_read_tag(con);
+	return 1;
+}
 
 /*
  * Write something to the socket.  Called in a worker thread when the
@@ -2526,6 +2546,10 @@
 
 do_next:
 	if (con->state == CON_STATE_OPEN) {
+		if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
+			prepare_write_keepalive(con);
+			goto more;
+		}
 		/* is anything else pending? */
 		if (!list_empty(&con->out_queue)) {
 			prepare_write_message(con);
@@ -2535,10 +2559,6 @@
 			prepare_write_ack(con);
 			goto more;
 		}
-		if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
-			prepare_write_keepalive(con);
-			goto more;
-		}
 	}
 
 	/* Nothing to do! */
@@ -2641,6 +2661,9 @@
 		case CEPH_MSGR_TAG_ACK:
 			prepare_read_ack(con);
 			break;
+		case CEPH_MSGR_TAG_KEEPALIVE2_ACK:
+			prepare_read_keepalive_ack(con);
+			break;
 		case CEPH_MSGR_TAG_CLOSE:
 			con_close_socket(con);
 			con->state = CON_STATE_CLOSED;
@@ -2684,6 +2707,12 @@
 		process_ack(con);
 		goto more;
 	}
+	if (con->in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) {
+		ret = read_keepalive_ack(con);
+		if (ret <= 0)
+			goto out;
+		goto more;
+	}
 
 out:
 	dout("try_read done on %p ret %d\n", con, ret);
@@ -2799,7 +2828,7 @@
 /*
  * Do some work on a connection.  Drop a connection ref when we're done.
  */
-static void con_work(struct work_struct *work)
+static void ceph_con_workfn(struct work_struct *work)
 {
 	struct ceph_connection *con = container_of(work, struct ceph_connection,
 						   work.work);
@@ -3101,6 +3130,20 @@
 }
 EXPORT_SYMBOL(ceph_con_keepalive);
 
+bool ceph_con_keepalive_expired(struct ceph_connection *con,
+			       unsigned long interval)
+{
+	if (interval > 0 &&
+	    (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
+		struct timespec now = CURRENT_TIME;
+		struct timespec ts;
+		jiffies_to_timespec(interval, &ts);
+		ts = timespec_add(con->last_keepalive_ack, ts);
+		return timespec_compare(&now, &ts) >= 0;
+	}
+	return false;
+}
+
 static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type)
 {
 	struct ceph_msg_data *data;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 9d6ff12..edda016 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -149,6 +149,10 @@
 			      CEPH_ENTITY_TYPE_MON, monc->cur_mon,
 			      &monc->monmap->mon_inst[monc->cur_mon].addr);
 
+		/* send an initial keepalive to ensure our timestamp is
+		 * valid by the time we are in an OPENED state */
+		ceph_con_keepalive(&monc->con);
+
 		/* initiatiate authentication handshake */
 		ret = ceph_auth_build_hello(monc->auth,
 					    monc->m_auth->front.iov_base,
@@ -170,14 +174,19 @@
  */
 static void __schedule_delayed(struct ceph_mon_client *monc)
 {
-	unsigned int delay;
+	struct ceph_options *opt = monc->client->options;
+	unsigned long delay;
 
-	if (monc->cur_mon < 0 || __sub_expired(monc))
+	if (monc->cur_mon < 0 || __sub_expired(monc)) {
 		delay = 10 * HZ;
-	else
+	} else {
 		delay = 20 * HZ;
-	dout("__schedule_delayed after %u\n", delay);
-	schedule_delayed_work(&monc->delayed_work, delay);
+		if (opt->monc_ping_timeout > 0)
+			delay = min(delay, opt->monc_ping_timeout / 3);
+	}
+	dout("__schedule_delayed after %lu\n", delay);
+	schedule_delayed_work(&monc->delayed_work,
+			      round_jiffies_relative(delay));
 }
 
 /*
@@ -743,11 +752,23 @@
 		__close_session(monc);
 		__open_session(monc);  /* continue hunting */
 	} else {
-		ceph_con_keepalive(&monc->con);
+		struct ceph_options *opt = monc->client->options;
+		int is_auth = ceph_auth_is_authenticated(monc->auth);
+		if (ceph_con_keepalive_expired(&monc->con,
+					       opt->monc_ping_timeout)) {
+			dout("monc keepalive timeout\n");
+			is_auth = 0;
+			__close_session(monc);
+			monc->hunting = true;
+			__open_session(monc);
+		}
 
-		__validate_auth(monc);
+		if (!monc->hunting) {
+			ceph_con_keepalive(&monc->con);
+			__validate_auth(monc);
+		}
 
-		if (ceph_auth_is_authenticated(monc->auth))
+		if (is_auth)
 			__send_subscribe(monc);
 	}
 	__schedule_delayed(monc);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 5003367..80b94e3 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2817,8 +2817,9 @@
 }
 
 /*
- * lookup and return message for incoming reply.  set up reply message
- * pages.
+ * Lookup and return message for incoming reply.  Don't try to do
+ * anything about a larger than preallocated data portion of the
+ * message at the moment - for now, just skip the message.
  */
 static struct ceph_msg *get_reply(struct ceph_connection *con,
 				  struct ceph_msg_header *hdr,
@@ -2836,10 +2837,10 @@
 	mutex_lock(&osdc->request_mutex);
 	req = __lookup_request(osdc, tid);
 	if (!req) {
-		*skip = 1;
+		pr_warn("%s osd%d tid %llu unknown, skipping\n",
+			__func__, osd->o_osd, tid);
 		m = NULL;
-		dout("get_reply unknown tid %llu from osd%d\n", tid,
-		     osd->o_osd);
+		*skip = 1;
 		goto out;
 	}
 
@@ -2849,10 +2850,9 @@
 	ceph_msg_revoke_incoming(req->r_reply);
 
 	if (front_len > req->r_reply->front_alloc_len) {
-		pr_warn("get_reply front %d > preallocated %d (%u#%llu)\n",
-			front_len, req->r_reply->front_alloc_len,
-			(unsigned int)con->peer_name.type,
-			le64_to_cpu(con->peer_name.num));
+		pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
+			__func__, osd->o_osd, req->r_tid, front_len,
+			req->r_reply->front_alloc_len);
 		m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
 				 false);
 		if (!m)
@@ -2860,37 +2860,22 @@
 		ceph_msg_put(req->r_reply);
 		req->r_reply = m;
 	}
-	m = ceph_msg_get(req->r_reply);
 
-	if (data_len > 0) {
-		struct ceph_osd_data *osd_data;
-
-		/*
-		 * XXX This is assuming there is only one op containing
-		 * XXX page data.  Probably OK for reads, but this
-		 * XXX ought to be done more generally.
-		 */
-		osd_data = osd_req_op_extent_osd_data(req, 0);
-		if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
-			if (osd_data->pages &&
-				unlikely(osd_data->length < data_len)) {
-
-				pr_warn("tid %lld reply has %d bytes we had only %llu bytes ready\n",
-					tid, data_len, osd_data->length);
-				*skip = 1;
-				ceph_msg_put(m);
-				m = NULL;
-				goto out;
-			}
-		}
+	if (data_len > req->r_reply->data_length) {
+		pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
+			__func__, osd->o_osd, req->r_tid, data_len,
+			req->r_reply->data_length);
+		m = NULL;
+		*skip = 1;
+		goto out;
 	}
-	*skip = 0;
+
+	m = ceph_msg_get(req->r_reply);
 	dout("get_reply tid %lld %p\n", tid, m);
 
 out:
 	mutex_unlock(&osdc->request_mutex);
 	return m;
-
 }
 
 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 4a31258..7d8f581 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1300,7 +1300,7 @@
 		ceph_decode_addr(&addr);
 		pr_info("osd%d up\n", osd);
 		BUG_ON(osd >= map->max_osd);
-		map->osd_state[osd] |= CEPH_OSD_UP;
+		map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS;
 		map->osd_addr[osd] = addr;
 	}
 
diff --git a/net/core/dev.c b/net/core/dev.c
index ee0d628..a229bf0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2974,6 +2974,7 @@
 			new_index = skb_tx_hash(dev, skb);
 
 		if (queue_index != new_index && sk &&
+		    sk_fullsock(sk) &&
 		    rcu_access_pointer(sk->sk_dst_cache))
 			sk_tx_queue_set(sk, new_index);
 
@@ -4723,6 +4724,8 @@
 
 	while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
 		msleep(1);
+	while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
+		msleep(1);
 
 	hrtimer_cancel(&n->timer);
 
@@ -4865,8 +4868,7 @@
 	struct rcu_head rcu;
 };
 
-static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
-						 struct net_device *adj_dev,
+static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
 						 struct list_head *adj_list)
 {
 	struct netdev_adjacent *adj;
@@ -4892,7 +4894,7 @@
 {
 	ASSERT_RTNL();
 
-	return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
+	return __netdev_find_adj(upper_dev, &dev->all_adj_list.upper);
 }
 EXPORT_SYMBOL(netdev_has_upper_dev);
 
@@ -5154,7 +5156,7 @@
 	struct netdev_adjacent *adj;
 	int ret;
 
-	adj = __netdev_find_adj(dev, adj_dev, dev_list);
+	adj = __netdev_find_adj(adj_dev, dev_list);
 
 	if (adj) {
 		adj->ref_nr++;
@@ -5210,7 +5212,7 @@
 {
 	struct netdev_adjacent *adj;
 
-	adj = __netdev_find_adj(dev, adj_dev, dev_list);
+	adj = __netdev_find_adj(adj_dev, dev_list);
 
 	if (!adj) {
 		pr_err("tried to remove device %s from %s\n",
@@ -5331,10 +5333,10 @@
 		return -EBUSY;
 
 	/* To prevent loops, check if dev is not upper device to upper_dev. */
-	if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
+	if (__netdev_find_adj(dev, &upper_dev->all_adj_list.upper))
 		return -EBUSY;
 
-	if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
+	if (__netdev_find_adj(upper_dev, &dev->adj_list.upper))
 		return -EEXIST;
 
 	if (master && netdev_master_upper_dev_get(dev))
@@ -5612,7 +5614,7 @@
 
 	if (!lower_dev)
 		return NULL;
-	lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
+	lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
 	if (!lower)
 		return NULL;
 
diff --git a/net/core/dst.c b/net/core/dst.c
index 0771c8c..2a18180 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -144,12 +144,12 @@
 	mutex_unlock(&dst_gc_mutex);
 }
 
-int dst_discard_sk(struct sock *sk, struct sk_buff *skb)
+int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	kfree_skb(skb);
 	return 0;
 }
-EXPORT_SYMBOL(dst_discard_sk);
+EXPORT_SYMBOL(dst_discard_out);
 
 const u32 dst_default_metrics[RTAX_MAX + 1] = {
 	/* This initializer is needed to force linker to place this variable
@@ -177,7 +177,7 @@
 	dst->xfrm = NULL;
 #endif
 	dst->input = dst_discard;
-	dst->output = dst_discard_sk;
+	dst->output = dst_discard_out;
 	dst->error = 0;
 	dst->obsolete = initial_obsolete;
 	dst->header_len = 0;
@@ -224,7 +224,7 @@
 	 */
 	if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
 		dst->input = dst_discard;
-		dst->output = dst_discard_sk;
+		dst->output = dst_discard_out;
 	}
 	dst->obsolete = DST_OBSOLETE_DEAD;
 }
@@ -352,7 +352,7 @@
 	.family =		AF_UNSPEC,
 };
 
-static int dst_md_discard_sk(struct sock *sk, struct sk_buff *skb)
+static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	WARN_ONCE(1, "Attempting to call output on metadata dst\n");
 	kfree_skb(skb);
@@ -375,7 +375,7 @@
 		 DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
 
 	dst->input = dst_md_discard;
-	dst->output = dst_md_discard_sk;
+	dst->output = dst_md_discard_out;
 
 	memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
 }
@@ -430,7 +430,7 @@
 
 	if (!unregister) {
 		dst->input = dst_discard;
-		dst->output = dst_discard_sk;
+		dst->output = dst_discard_out;
 	} else {
 		dst->dev = dev_net(dst->dev)->loopback_dev;
 		dev_hold(dst->dev);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index bf77e36..365de66 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -631,15 +631,17 @@
 {
 	int idx = 0;
 	struct fib_rule *rule;
+	int err = 0;
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
 		if (idx < cb->args[1])
 			goto skip;
 
-		if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
-				     cb->nlh->nlmsg_seq, RTM_NEWRULE,
-				     NLM_F_MULTI, ops) < 0)
+		err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
+				       cb->nlh->nlmsg_seq, RTM_NEWRULE,
+				       NLM_F_MULTI, ops);
+		if (err)
 			break;
 skip:
 		idx++;
@@ -648,7 +650,7 @@
 	cb->args[1] = idx;
 	rules_ops_put(ops);
 
-	return skb->len;
+	return err;
 }
 
 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
@@ -664,7 +666,9 @@
 		if (ops == NULL)
 			return -EAFNOSUPPORT;
 
-		return dump_rules(skb, cb, ops);
+		dump_rules(skb, cb, ops);
+
+		return skb->len;
 	}
 
 	rcu_read_lock();
diff --git a/net/core/filter.c b/net/core/filter.c
index da3f3d9..0b00094 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -49,16 +49,17 @@
 #include <net/sch_generic.h>
 #include <net/cls_cgroup.h>
 #include <net/dst_metadata.h>
+#include <net/dst.h>
 
 /**
  *	sk_filter - run a packet through a socket filter
  *	@sk: sock associated with &sk_buff
  *	@skb: buffer to filter
  *
- * Run the filter code and then cut skb->data to correct size returned by
- * SK_RUN_FILTER. If pkt_len is 0 we toss packet. If skb->len is smaller
+ * Run the eBPF program and then cut skb->data to correct size returned by
+ * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
  * than pkt_len we keep whole skb->data. This is the socket level
- * wrapper to SK_RUN_FILTER. It returns 0 if the packet should
+ * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
  * be accepted or -EPERM if the packet should be tossed.
  *
  */
@@ -82,7 +83,7 @@
 	rcu_read_lock();
 	filter = rcu_dereference(sk->sk_filter);
 	if (filter) {
-		unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
+		unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
 
 		err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
 	}
@@ -148,12 +149,6 @@
 	return raw_smp_processor_id();
 }
 
-/* note that this only generates 32-bit random numbers */
-static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
-{
-	return prandom_u32();
-}
-
 static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
 			      struct bpf_insn *insn_buf)
 {
@@ -312,7 +307,8 @@
 			*insn = BPF_EMIT_CALL(__get_raw_cpu_id);
 			break;
 		case SKF_AD_OFF + SKF_AD_RANDOM:
-			*insn = BPF_EMIT_CALL(__get_random_u32);
+			*insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
+			bpf_user_rnd_init_once();
 			break;
 		}
 		break;
@@ -478,9 +474,9 @@
 				bpf_src = BPF_X;
 			} else {
 				insn->dst_reg = BPF_REG_A;
-				insn->src_reg = BPF_REG_X;
 				insn->imm = fp->k;
 				bpf_src = BPF_SRC(fp->code);
+				insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
 			}
 
 			/* Common case where 'jump_false' is next insn. */
@@ -1001,7 +997,7 @@
 	int err;
 
 	fp->bpf_func = NULL;
-	fp->jited = false;
+	fp->jited = 0;
 
 	err = bpf_check_classic(fp->insns, fp->len);
 	if (err) {
@@ -1083,16 +1079,18 @@
  *	@pfp: the unattached filter that is created
  *	@fprog: the filter program
  *	@trans: post-classic verifier transformation handler
+ *	@save_orig: save classic BPF program
  *
  * This function effectively does the same as bpf_prog_create(), only
  * that it builds up its insns buffer from user space provided buffer.
  * It also allows for passing a bpf_aux_classic_check_t handler.
  */
 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
-			      bpf_aux_classic_check_t trans)
+			      bpf_aux_classic_check_t trans, bool save_orig)
 {
 	unsigned int fsize = bpf_classic_proglen(fprog);
 	struct bpf_prog *fp;
+	int err;
 
 	/* Make sure new filter is there and in the right amounts. */
 	if (fprog->filter == NULL)
@@ -1108,12 +1106,16 @@
 	}
 
 	fp->len = fprog->len;
-	/* Since unattached filters are not copied back to user
-	 * space through sk_get_filter(), we do not need to hold
-	 * a copy here, and can spare us the work.
-	 */
 	fp->orig_prog = NULL;
 
+	if (save_orig) {
+		err = bpf_prog_store_orig_filter(fp, fprog);
+		if (err) {
+			__bpf_prog_free(fp);
+			return -ENOMEM;
+		}
+	}
+
 	/* bpf_prepare_filter() already takes care of freeing
 	 * memory in case something goes wrong.
 	 */
@@ -1404,9 +1406,6 @@
 	if (unlikely(!dev))
 		return -EINVAL;
 
-	if (unlikely(!(dev->flags & IFF_UP)))
-		return -EINVAL;
-
 	skb2 = skb_clone(skb, GFP_ATOMIC);
 	if (unlikely(!skb2))
 		return -ENOMEM;
@@ -1458,6 +1457,7 @@
 		return dev_forward_skb(dev, skb);
 
 	skb->dev = dev;
+	skb_sender_cpu_clear(skb);
 	return dev_queue_xmit(skb);
 }
 
@@ -1481,6 +1481,25 @@
 	.arg1_type      = ARG_PTR_TO_CTX,
 };
 
+static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+#ifdef CONFIG_IP_ROUTE_CLASSID
+	const struct dst_entry *dst;
+
+	dst = skb_dst((struct sk_buff *) (unsigned long) r1);
+	if (dst)
+		return dst->tclassid;
+#endif
+	return 0;
+}
+
+static const struct bpf_func_proto bpf_get_route_realm_proto = {
+	.func           = bpf_get_route_realm,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+};
+
 static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
 {
 	struct sk_buff *skb = (struct sk_buff *) (long) r1;
@@ -1621,7 +1640,8 @@
 	case BPF_FUNC_ktime_get_ns:
 		return &bpf_ktime_get_ns_proto;
 	case BPF_FUNC_trace_printk:
-		return bpf_get_trace_printk_proto();
+		if (capable(CAP_SYS_ADMIN))
+			return bpf_get_trace_printk_proto();
 	default:
 		return NULL;
 	}
@@ -1651,6 +1671,8 @@
 		return bpf_get_skb_set_tunnel_key_proto();
 	case BPF_FUNC_redirect:
 		return &bpf_redirect_proto;
+	case BPF_FUNC_get_route_realm:
+		return &bpf_get_route_realm_proto;
 	default:
 		return sk_filter_func_proto(func_id);
 	}
@@ -1702,6 +1724,7 @@
 		switch (off) {
 		case offsetof(struct __sk_buff, mark):
 		case offsetof(struct __sk_buff, tc_index):
+		case offsetof(struct __sk_buff, priority):
 		case offsetof(struct __sk_buff, cb[0]) ...
 			offsetof(struct __sk_buff, cb[4]):
 			break;
@@ -1714,7 +1737,8 @@
 
 static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
 				      int src_reg, int ctx_off,
-				      struct bpf_insn *insn_buf)
+				      struct bpf_insn *insn_buf,
+				      struct bpf_prog *prog)
 {
 	struct bpf_insn *insn = insn_buf;
 
@@ -1743,8 +1767,12 @@
 	case offsetof(struct __sk_buff, priority):
 		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
 
-		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
-				      offsetof(struct sk_buff, priority));
+		if (type == BPF_WRITE)
+			*insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+					      offsetof(struct sk_buff, priority));
+		else
+			*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+					      offsetof(struct sk_buff, priority));
 		break;
 
 	case offsetof(struct __sk_buff, ingress_ifindex):
@@ -1801,6 +1829,7 @@
 		offsetof(struct __sk_buff, cb[4]):
 		BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
 
+		prog->cb_access = 1;
 		ctx_off -= offsetof(struct __sk_buff, cb[0]);
 		ctx_off += offsetof(struct sk_buff, cb);
 		ctx_off += offsetof(struct qdisc_skb_cb, data);
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index dfb1a9c..299cfc2 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -180,7 +180,7 @@
 }
 EXPORT_SYMBOL(lwtunnel_cmp_encap);
 
-int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
 	const struct lwtunnel_encap_ops *ops;
@@ -199,7 +199,7 @@
 	rcu_read_lock();
 	ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
 	if (likely(ops && ops->output))
-		ret = ops->output(sk, skb);
+		ret = ops->output(net, sk, skb);
 	rcu_read_unlock();
 
 	if (ret == -EOPNOTSUPP)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 2b515ba..1aa8437 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2235,14 +2235,53 @@
 	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
 }
 
+static bool neigh_master_filtered(struct net_device *dev, int master_idx)
+{
+	struct net_device *master;
+
+	if (!master_idx)
+		return false;
+
+	master = netdev_master_upper_dev_get(dev);
+	if (!master || master->ifindex != master_idx)
+		return true;
+
+	return false;
+}
+
+static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
+{
+	if (filter_idx && dev->ifindex != filter_idx)
+		return true;
+
+	return false;
+}
+
 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
 			    struct netlink_callback *cb)
 {
 	struct net *net = sock_net(skb->sk);
+	const struct nlmsghdr *nlh = cb->nlh;
+	struct nlattr *tb[NDA_MAX + 1];
 	struct neighbour *n;
 	int rc, h, s_h = cb->args[1];
 	int idx, s_idx = idx = cb->args[2];
 	struct neigh_hash_table *nht;
+	int filter_master_idx = 0, filter_idx = 0;
+	unsigned int flags = NLM_F_MULTI;
+	int err;
+
+	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
+	if (!err) {
+		if (tb[NDA_IFINDEX])
+			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
+
+		if (tb[NDA_MASTER])
+			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
+
+		if (filter_idx || filter_master_idx)
+			flags |= NLM_F_DUMP_FILTERED;
+	}
 
 	rcu_read_lock_bh();
 	nht = rcu_dereference_bh(tbl->nht);
@@ -2255,12 +2294,16 @@
 		     n = rcu_dereference_bh(n->next)) {
 			if (!net_eq(dev_net(n->dev), net))
 				continue;
+			if (neigh_ifindex_filtered(n->dev, filter_idx))
+				continue;
+			if (neigh_master_filtered(n->dev, filter_master_idx))
+				continue;
 			if (idx < s_idx)
 				goto next;
 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
 					    cb->nlh->nlmsg_seq,
 					    RTM_NEWNEIGH,
-					    NLM_F_MULTI) < 0) {
+					    flags) < 0) {
 				rc = -1;
 				goto out;
 			}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 49b5990..f88a62a 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -31,7 +31,6 @@
 static const char fmt_hex[] = "%#x\n";
 static const char fmt_long_hex[] = "%#lx\n";
 static const char fmt_dec[] = "%d\n";
-static const char fmt_udec[] = "%u\n";
 static const char fmt_ulong[] = "%lu\n";
 static const char fmt_u64[] = "%llu\n";
 
@@ -202,7 +201,7 @@
 	if (netif_running(netdev)) {
 		struct ethtool_cmd cmd;
 		if (!__ethtool_get_settings(netdev, &cmd))
-			ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
+			ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd));
 	}
 	rtnl_unlock();
 	return ret;
@@ -472,7 +471,7 @@
 
 	if (dev_isalive(netdev)) {
 		struct switchdev_attr attr = {
-			.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+			.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
 			.flags = SWITCHDEV_F_NO_RECURSE,
 		};
 
@@ -1478,6 +1477,15 @@
 	return ret == 0 ? dev->of_node == data : ret;
 }
 
+/*
+ * of_find_net_device_by_node - lookup the net device for the device node
+ * @np: OF device node
+ *
+ * Looks up the net_device structure corresponding with the device node.
+ * If successful, returns a pointer to the net_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure. The
+ * refcount must be dropped when done with the net_device.
+ */
 struct net_device *of_find_net_device_by_node(struct device_node *np)
 {
 	struct device *dev;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 6aa3db8..94acfc8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -140,36 +140,42 @@
  * case. Further, we test the poll_owner to avoid recursion on UP
  * systems where the lock doesn't exist.
  */
-static int poll_one_napi(struct napi_struct *napi, int budget)
+static void poll_one_napi(struct napi_struct *napi)
 {
-	int work;
+	int work = 0;
 
 	/* net_rx_action's ->poll() invocations and our's are
 	 * synchronized by this test which is only made while
 	 * holding the napi->poll_lock.
 	 */
 	if (!test_bit(NAPI_STATE_SCHED, &napi->state))
-		return budget;
+		return;
 
-	set_bit(NAPI_STATE_NPSVC, &napi->state);
+	/* If we set this bit but see that it has already been set,
+	 * that indicates that napi has been disabled and we need
+	 * to abort this operation
+	 */
+	if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
+		return;
 
-	work = napi->poll(napi, budget);
-	WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
+	/* We explicilty pass the polling call a budget of 0 to
+	 * indicate that we are clearing the Tx path only.
+	 */
+	work = napi->poll(napi, 0);
+	WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll);
 	trace_napi_poll(napi);
 
 	clear_bit(NAPI_STATE_NPSVC, &napi->state);
-
-	return budget - work;
 }
 
-static void poll_napi(struct net_device *dev, int budget)
+static void poll_napi(struct net_device *dev)
 {
 	struct napi_struct *napi;
 
 	list_for_each_entry(napi, &dev->napi_list, dev_list) {
 		if (napi->poll_owner != smp_processor_id() &&
 		    spin_trylock(&napi->poll_lock)) {
-			budget = poll_one_napi(napi, budget);
+			poll_one_napi(napi);
 			spin_unlock(&napi->poll_lock);
 		}
 	}
@@ -179,7 +185,6 @@
 {
 	const struct net_device_ops *ops;
 	struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
-	int budget = 0;
 
 	/* Don't do any rx activity if the dev_lock mutex is held
 	 * the dev_open/close paths use this to block netpoll activity
@@ -202,7 +207,7 @@
 	/* Process pending work on NIC */
 	ops->ndo_poll_controller(dev);
 
-	poll_napi(dev, budget);
+	poll_napi(dev);
 
 	up(&ni->dev_lock);
 
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index b42f0e2..5d26056 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -37,90 +37,16 @@
 int sysctl_max_syn_backlog = 256;
 EXPORT_SYMBOL(sysctl_max_syn_backlog);
 
-int reqsk_queue_alloc(struct request_sock_queue *queue,
-		      unsigned int nr_table_entries)
+void reqsk_queue_alloc(struct request_sock_queue *queue)
 {
-	size_t lopt_size = sizeof(struct listen_sock);
-	struct listen_sock *lopt = NULL;
+	spin_lock_init(&queue->rskq_lock);
 
-	nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
-	nr_table_entries = max_t(u32, nr_table_entries, 8);
-	nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
-	lopt_size += nr_table_entries * sizeof(struct request_sock *);
+	spin_lock_init(&queue->fastopenq.lock);
+	queue->fastopenq.rskq_rst_head = NULL;
+	queue->fastopenq.rskq_rst_tail = NULL;
+	queue->fastopenq.qlen = 0;
 
-	if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
-		lopt = kzalloc(lopt_size, GFP_KERNEL |
-					  __GFP_NOWARN |
-					  __GFP_NORETRY);
-	if (!lopt)
-		lopt = vzalloc(lopt_size);
-	if (!lopt)
-		return -ENOMEM;
-
-	get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
-	spin_lock_init(&queue->syn_wait_lock);
 	queue->rskq_accept_head = NULL;
-	lopt->nr_table_entries = nr_table_entries;
-	lopt->max_qlen_log = ilog2(nr_table_entries);
-
-	spin_lock_bh(&queue->syn_wait_lock);
-	queue->listen_opt = lopt;
-	spin_unlock_bh(&queue->syn_wait_lock);
-
-	return 0;
-}
-
-void __reqsk_queue_destroy(struct request_sock_queue *queue)
-{
-	/* This is an error recovery path only, no locking needed */
-	kvfree(queue->listen_opt);
-}
-
-static inline struct listen_sock *reqsk_queue_yank_listen_sk(
-		struct request_sock_queue *queue)
-{
-	struct listen_sock *lopt;
-
-	spin_lock_bh(&queue->syn_wait_lock);
-	lopt = queue->listen_opt;
-	queue->listen_opt = NULL;
-	spin_unlock_bh(&queue->syn_wait_lock);
-
-	return lopt;
-}
-
-void reqsk_queue_destroy(struct request_sock_queue *queue)
-{
-	/* make all the listen_opt local to us */
-	struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
-
-	if (listen_sock_qlen(lopt) != 0) {
-		unsigned int i;
-
-		for (i = 0; i < lopt->nr_table_entries; i++) {
-			struct request_sock *req;
-
-			spin_lock_bh(&queue->syn_wait_lock);
-			while ((req = lopt->syn_table[i]) != NULL) {
-				lopt->syn_table[i] = req->dl_next;
-				/* Because of following del_timer_sync(),
-				 * we must release the spinlock here
-				 * or risk a dead lock.
-				 */
-				spin_unlock_bh(&queue->syn_wait_lock);
-				atomic_inc(&lopt->qlen_dec);
-				if (del_timer_sync(&req->rsk_timer))
-					reqsk_put(req);
-				reqsk_put(req);
-				spin_lock_bh(&queue->syn_wait_lock);
-			}
-			spin_unlock_bh(&queue->syn_wait_lock);
-		}
-	}
-
-	if (WARN_ON(listen_sock_qlen(lopt) != 0))
-		pr_err("qlen %u\n", listen_sock_qlen(lopt));
-	kvfree(lopt);
 }
 
 /*
@@ -174,7 +100,7 @@
 	struct sock *lsk = req->rsk_listener;
 	struct fastopen_queue *fastopenq;
 
-	fastopenq = inet_csk(lsk)->icsk_accept_queue.fastopenq;
+	fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq;
 
 	tcp_sk(sk)->fastopen_rsk = NULL;
 	spin_lock_bh(&fastopenq->lock);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e545229..2477595 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -96,7 +96,7 @@
 EXPORT_SYMBOL(rtnl_is_locked);
 
 #ifdef CONFIG_PROVE_LOCKING
-int lockdep_rtnl_is_held(void)
+bool lockdep_rtnl_is_held(void)
 {
 	return lockdep_is_held(&rtnl_mutex);
 }
@@ -1025,7 +1025,7 @@
 {
 	int err;
 	struct switchdev_attr attr = {
-		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
 		.flags = SWITCHDEV_F_NO_RECURSE,
 	};
 
@@ -3047,6 +3047,7 @@
 	u32 portid = NETLINK_CB(cb->skb).portid;
 	u32 seq = cb->nlh->nlmsg_seq;
 	u32 filter_mask = 0;
+	int err;
 
 	if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
 		struct nlattr *extfilt;
@@ -3067,20 +3068,25 @@
 		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
 
 		if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
-			if (idx >= cb->args[0] &&
-			    br_dev->netdev_ops->ndo_bridge_getlink(
-				    skb, portid, seq, dev, filter_mask,
-				    NLM_F_MULTI) < 0)
-				break;
+			if (idx >= cb->args[0]) {
+				err = br_dev->netdev_ops->ndo_bridge_getlink(
+						skb, portid, seq, dev,
+						filter_mask, NLM_F_MULTI);
+				if (err < 0 && err != -EOPNOTSUPP)
+					break;
+			}
 			idx++;
 		}
 
 		if (ops->ndo_bridge_getlink) {
-			if (idx >= cb->args[0] &&
-			    ops->ndo_bridge_getlink(skb, portid, seq, dev,
-						    filter_mask,
-						    NLM_F_MULTI) < 0)
-				break;
+			if (idx >= cb->args[0]) {
+				err = ops->ndo_bridge_getlink(skb, portid,
+							      seq, dev,
+							      filter_mask,
+							      NLM_F_MULTI);
+				if (err < 0 && err != -EOPNOTSUPP)
+					break;
+			}
 			idx++;
 		}
 	}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index dad4dd3..fab4599 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2958,11 +2958,12 @@
  */
 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
 {
+	unsigned char *data = skb->data;
+
 	BUG_ON(len > skb->len);
-	skb->len -= len;
-	BUG_ON(skb->len < skb->data_len);
-	skb_postpull_rcsum(skb, skb->data, len);
-	return skb->data += len;
+	__skb_pull(skb, len);
+	skb_postpull_rcsum(skb, data, len);
+	return skb->data;
 }
 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
 
diff --git a/net/core/sock.c b/net/core/sock.c
index ca2984a..dcc7d62 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -988,6 +988,10 @@
 					 sk->sk_max_pacing_rate);
 		break;
 
+	case SO_INCOMING_CPU:
+		sk->sk_incoming_cpu = val;
+		break;
+
 	default:
 		ret = -ENOPROTOOPT;
 		break;
@@ -1852,6 +1856,32 @@
 }
 EXPORT_SYMBOL(sock_alloc_send_skb);
 
+int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
+		   struct sockcm_cookie *sockc)
+{
+	struct cmsghdr *cmsg;
+
+	for_each_cmsghdr(cmsg, msg) {
+		if (!CMSG_OK(msg, cmsg))
+			return -EINVAL;
+		if (cmsg->cmsg_level != SOL_SOCKET)
+			continue;
+		switch (cmsg->cmsg_type) {
+		case SO_MARK:
+			if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+				return -EPERM;
+			if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
+				return -EINVAL;
+			sockc->mark = *(u32 *)CMSG_DATA(cmsg);
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL(sock_cmsg_send);
+
 /* On 32bit arches, an skb frag is limited to 2^15 */
 #define SKB_FRAG_PAGE_ORDER	get_order(32768)
 
@@ -2353,6 +2383,7 @@
 
 	sk->sk_max_pacing_rate = ~0U;
 	sk->sk_pacing_rate = ~0U;
+	sk->sk_incoming_cpu = -1;
 	/*
 	 * Before updating sk_refcnt, we must commit prior changes to memory
 	 * (Documentation/RCU/rculist_nulls.txt for details)
@@ -2740,10 +2771,8 @@
 		return;
 	kfree(rsk_prot->slab_name);
 	rsk_prot->slab_name = NULL;
-	if (rsk_prot->slab) {
-		kmem_cache_destroy(rsk_prot->slab);
-		rsk_prot->slab = NULL;
-	}
+	kmem_cache_destroy(rsk_prot->slab);
+	rsk_prot->slab = NULL;
 }
 
 static int req_prot_init(const struct proto *prot)
@@ -2760,7 +2789,7 @@
 
 	rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
 					   rsk_prot->obj_size, 0,
-					   0, NULL);
+					   prot->slab_flags, NULL);
 
 	if (!rsk_prot->slab) {
 		pr_crit("%s: Can't create request sock SLAB cache!\n",
@@ -2828,10 +2857,8 @@
 	list_del(&prot->node);
 	mutex_unlock(&proto_list_mutex);
 
-	if (prot->slab != NULL) {
-		kmem_cache_destroy(prot->slab);
-		prot->slab = NULL;
-	}
+	kmem_cache_destroy(prot->slab);
+	prot->slab = NULL;
 
 	req_prot_cleanup(prot->rsk_prot);
 
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 817622f..0c1d58d 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -1,3 +1,5 @@
+/* License: GPL */
+
 #include <linux/mutex.h>
 #include <linux/socket.h>
 #include <linux/skbuff.h>
@@ -323,14 +325,4 @@
 	BUG_ON(!broadcast_wq);
 	return register_pernet_subsys(&diag_net_ops);
 }
-
-static void __exit sock_diag_exit(void)
-{
-	unregister_pernet_subsys(&diag_net_ops);
-	destroy_workqueue(broadcast_wq);
-}
-
-module_init(sock_diag_init);
-module_exit(sock_diag_exit);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG);
+device_initcall(sock_diag_init);
diff --git a/net/core/utils.c b/net/core/utils.c
index 3dffce9..3d17ca8 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -348,52 +348,3 @@
 	}
 }
 EXPORT_SYMBOL(inet_proto_csum_replace_by_diff);
-
-struct __net_random_once_work {
-	struct work_struct work;
-	struct static_key *key;
-};
-
-static void __net_random_once_deferred(struct work_struct *w)
-{
-	struct __net_random_once_work *work =
-		container_of(w, struct __net_random_once_work, work);
-	BUG_ON(!static_key_enabled(work->key));
-	static_key_slow_dec(work->key);
-	kfree(work);
-}
-
-static void __net_random_once_disable_jump(struct static_key *key)
-{
-	struct __net_random_once_work *w;
-
-	w = kmalloc(sizeof(*w), GFP_ATOMIC);
-	if (!w)
-		return;
-
-	INIT_WORK(&w->work, __net_random_once_deferred);
-	w->key = key;
-	schedule_work(&w->work);
-}
-
-bool __net_get_random_once(void *buf, int nbytes, bool *done,
-			   struct static_key *once_key)
-{
-	static DEFINE_SPINLOCK(lock);
-	unsigned long flags;
-
-	spin_lock_irqsave(&lock, flags);
-	if (*done) {
-		spin_unlock_irqrestore(&lock, flags);
-		return false;
-	}
-
-	get_random_bytes(buf, nbytes);
-	*done = true;
-	spin_unlock_irqrestore(&lock, flags);
-
-	__net_random_once_disable_jump(once_key);
-
-	return true;
-}
-EXPORT_SYMBOL(__net_get_random_once);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 5b21f6f..4f6c186 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -13,6 +13,7 @@
  * You should have received a copy of the GNU General Public License along with
  * this program; if not, see <http://www.gnu.org/licenses/>.
  *
+ * Description: Data Center Bridging netlink interface
  * Author: Lucy Liu <lucy.liu@intel.com>
  */
 
@@ -24,7 +25,7 @@
 #include <linux/dcbnl.h>
 #include <net/dcbevent.h>
 #include <linux/rtnetlink.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <net/sock.h>
 
 /* Data Center Bridging (DCB) is a collection of Ethernet enhancements
@@ -48,10 +49,6 @@
  * features for capable devices.
  */
 
-MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
-MODULE_DESCRIPTION("Data Center Bridging netlink interface");
-MODULE_LICENSE("GPL");
-
 /**************** DCB attribute policies *************************************/
 
 /* DCB netlink attributes policy */
@@ -1935,19 +1932,6 @@
 }
 EXPORT_SYMBOL(dcb_ieee_delapp);
 
-static void dcb_flushapp(void)
-{
-	struct dcb_app_type *app;
-	struct dcb_app_type *tmp;
-
-	spin_lock_bh(&dcb_lock);
-	list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
-		list_del(&app->list);
-		kfree(app);
-	}
-	spin_unlock_bh(&dcb_lock);
-}
-
 static int __init dcbnl_init(void)
 {
 	INIT_LIST_HEAD(&dcb_app_list);
@@ -1957,12 +1941,4 @@
 
 	return 0;
 }
-module_init(dcbnl_init);
-
-static void __exit dcbnl_exit(void)
-{
-	rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
-	rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
-	dcb_flushapp();
-}
-module_exit(dcbnl_exit);
+device_initcall(dcbnl_init);
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index bd9e718..3de0d03 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -398,12 +398,8 @@
 
 void dccp_ackvec_exit(void)
 {
-	if (dccp_ackvec_slab != NULL) {
-		kmem_cache_destroy(dccp_ackvec_slab);
-		dccp_ackvec_slab = NULL;
-	}
-	if (dccp_ackvec_record_slab != NULL) {
-		kmem_cache_destroy(dccp_ackvec_record_slab);
-		dccp_ackvec_record_slab = NULL;
-	}
+	kmem_cache_destroy(dccp_ackvec_slab);
+	dccp_ackvec_slab = NULL;
+	kmem_cache_destroy(dccp_ackvec_record_slab);
+	dccp_ackvec_record_slab = NULL;
 }
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 8349897..90f77d0 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -95,8 +95,7 @@
 
 static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
 {
-	if (slab != NULL)
-		kmem_cache_destroy(slab);
+	kmem_cache_destroy(slab);
 }
 
 static int __init ccid_activate(struct ccid_operations *ccid_ops)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index bebc735..923f5a1 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -229,7 +229,7 @@
 int dccp_retransmit_skb(struct sock *sk);
 
 void dccp_send_ack(struct sock *sk);
-void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 			 struct request_sock *rsk);
 
 void dccp_send_sync(struct sock *sk, const u64 seq,
@@ -270,13 +270,13 @@
 
 int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 
-struct sock *dccp_create_openreq_child(struct sock *sk,
+struct sock *dccp_create_openreq_child(const struct sock *sk,
 				       const struct request_sock *req,
 				       const struct sk_buff *skb);
 
 int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 
-struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
+struct sock *dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb,
 				       struct request_sock *req,
 				       struct dst_entry *dst);
 struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
@@ -293,7 +293,7 @@
 void dccp_destroy_sock(struct sock *sk);
 
 void dccp_close(struct sock *sk, long timeout);
-struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
+struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
 				   struct request_sock *req);
 
 int dccp_connect(struct sock *sk);
@@ -325,13 +325,13 @@
 int dccp_invalid_packet(struct sk_buff *skb);
 u32 dccp_sample_rtt(struct sock *sk, long delta);
 
-static inline int dccp_bad_service_code(const struct sock *sk,
+static inline bool dccp_bad_service_code(const struct sock *sk,
 					const __be32 service)
 {
 	const struct dccp_sock *dp = dccp_sk(sk);
 
 	if (dp->dccps_service == service)
-		return 0;
+		return false;
 	return !dccp_list_has_service(dp->dccps_service_list, service);
 }
 
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ccf4c56..8e99681 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -390,7 +390,8 @@
  *
  * This is the equivalent of TCP's tcp_v4_syn_recv_sock
  */
-struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
+struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
+				       struct sk_buff *skb,
 				       struct request_sock *req,
 				       struct dst_entry *dst)
 {
@@ -443,36 +444,6 @@
 }
 EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock);
 
-static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
-{
-	const struct dccp_hdr *dh = dccp_hdr(skb);
-	const struct iphdr *iph = ip_hdr(skb);
-	struct sock *nsk;
-	/* Find possible connection requests. */
-	struct request_sock *req = inet_csk_search_req(sk, dh->dccph_sport,
-						       iph->saddr, iph->daddr);
-	if (req) {
-		nsk = dccp_check_req(sk, skb, req);
-		if (!nsk)
-			reqsk_put(req);
-		return nsk;
-	}
-	nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo,
-				      iph->saddr, dh->dccph_sport,
-				      iph->daddr, dh->dccph_dport,
-				      inet_iif(skb));
-	if (nsk != NULL) {
-		if (nsk->sk_state != DCCP_TIME_WAIT) {
-			bh_lock_sock(nsk);
-			return nsk;
-		}
-		inet_twsk_put(inet_twsk(nsk));
-		return NULL;
-	}
-
-	return sk;
-}
-
 static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
 					   struct sk_buff *skb)
 {
@@ -498,7 +469,7 @@
 	return &rt->dst;
 }
 
-static int dccp_v4_send_response(struct sock *sk, struct request_sock *req)
+static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req)
 {
 	int err = -1;
 	struct sk_buff *skb;
@@ -527,7 +498,7 @@
 	return err;
 }
 
-static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
+static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
 {
 	int err;
 	const struct iphdr *rxiph;
@@ -624,7 +595,7 @@
 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
 		goto drop;
 
-	req = inet_reqsk_alloc(&dccp_request_sock_ops, sk);
+	req = inet_reqsk_alloc(&dccp_request_sock_ops, sk, true);
 	if (req == NULL)
 		goto drop;
 
@@ -704,18 +675,6 @@
 	 * NOTE: the check for the packet types is done in
 	 *	 dccp_rcv_state_process
 	 */
-	if (sk->sk_state == DCCP_LISTEN) {
-		struct sock *nsk = dccp_v4_hnd_req(sk, skb);
-
-		if (nsk == NULL)
-			goto discard;
-
-		if (nsk != sk) {
-			if (dccp_child_process(sk, nsk, skb))
-				goto reset;
-			return 0;
-		}
-	}
 
 	if (dccp_rcv_state_process(sk, skb, dh, skb->len))
 		goto reset;
@@ -723,7 +682,6 @@
 
 reset:
 	dccp_v4_ctl_send_reset(sk, skb);
-discard:
 	kfree_skb(skb);
 	return 0;
 }
@@ -867,6 +825,27 @@
 		goto no_dccp_socket;
 	}
 
+	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
+		struct request_sock *req = inet_reqsk(sk);
+		struct sock *nsk = NULL;
+
+		sk = req->rsk_listener;
+		if (sk->sk_state == DCCP_LISTEN)
+			nsk = dccp_check_req(sk, skb, req);
+		if (!nsk) {
+			reqsk_put(req);
+			goto discard_it;
+		}
+		if (nsk == sk) {
+			sock_hold(sk);
+			reqsk_put(req);
+		} else if (dccp_child_process(sk, nsk, skb)) {
+			dccp_v4_ctl_send_reset(sk, skb);
+			goto discard_it;
+		} else {
+			return 0;
+		}
+	}
 	/*
 	 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
 	 *	o if MinCsCov = 0, only packets with CsCov = 0 are accepted
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 5165571..aed314f 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -181,7 +181,7 @@
 }
 
 
-static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
+static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
 	struct ipv6_pinfo *np = inet6_sk(sk);
@@ -234,7 +234,7 @@
 	kfree_skb(inet_rsk(req)->pktopts);
 }
 
-static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
+static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
 {
 	const struct ipv6hdr *rxip6h;
 	struct sk_buff *skb;
@@ -290,37 +290,6 @@
 	.syn_ack_timeout = dccp_syn_ack_timeout,
 };
 
-static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
-{
-	const struct dccp_hdr *dh = dccp_hdr(skb);
-	const struct ipv6hdr *iph = ipv6_hdr(skb);
-	struct request_sock *req;
-	struct sock *nsk;
-
-	req = inet6_csk_search_req(sk, dh->dccph_sport, &iph->saddr,
-				   &iph->daddr, inet6_iif(skb));
-	if (req) {
-		nsk = dccp_check_req(sk, skb, req);
-		if (!nsk)
-			reqsk_put(req);
-		return nsk;
-	}
-	nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
-					 &iph->saddr, dh->dccph_sport,
-					 &iph->daddr, ntohs(dh->dccph_dport),
-					 inet6_iif(skb));
-	if (nsk != NULL) {
-		if (nsk->sk_state != DCCP_TIME_WAIT) {
-			bh_lock_sock(nsk);
-			return nsk;
-		}
-		inet_twsk_put(inet_twsk(nsk));
-		return NULL;
-	}
-
-	return sk;
-}
-
 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 {
 	struct request_sock *req;
@@ -350,7 +319,7 @@
 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
 		goto drop;
 
-	req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk);
+	req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
 	if (req == NULL)
 		goto drop;
 
@@ -398,7 +367,7 @@
 	if (dccp_v6_send_response(sk, req))
 		goto drop_and_free;
 
-	inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
 	return 0;
 
 drop_and_free:
@@ -408,13 +377,14 @@
 	return -1;
 }
 
-static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
+static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
 					      struct sk_buff *skb,
 					      struct request_sock *req,
 					      struct dst_entry *dst)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
-	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
+	struct ipv6_pinfo *newnp;
+	const struct ipv6_pinfo *np = inet6_sk(sk);
 	struct inet_sock *newinet;
 	struct dccp6_sock *newdp6;
 	struct sock *newsk;
@@ -462,22 +432,11 @@
 	if (sk_acceptq_is_full(sk))
 		goto out_overflow;
 
-	if (dst == NULL) {
-		struct in6_addr *final_p, final;
+	if (!dst) {
 		struct flowi6 fl6;
 
-		memset(&fl6, 0, sizeof(fl6));
-		fl6.flowi6_proto = IPPROTO_DCCP;
-		fl6.daddr = ireq->ir_v6_rmt_addr;
-		final_p = fl6_update_dst(&fl6, np->opt, &final);
-		fl6.saddr = ireq->ir_v6_loc_addr;
-		fl6.flowi6_oif = sk->sk_bound_dev_if;
-		fl6.fl6_dport = ireq->ir_rmt_port;
-		fl6.fl6_sport = htons(ireq->ir_num);
-		security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
-
-		dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
-		if (IS_ERR(dst))
+		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_DCCP);
+		if (!dst)
 			goto out;
 	}
 
@@ -651,24 +610,6 @@
 	 * NOTE: the check for the packet types is done in
 	 *	 dccp_rcv_state_process
 	 */
-	if (sk->sk_state == DCCP_LISTEN) {
-		struct sock *nsk = dccp_v6_hnd_req(sk, skb);
-
-		if (nsk == NULL)
-			goto discard;
-		/*
-		 * Queue it on the new socket if the new socket is active,
-		 * otherwise we just shortcircuit this and continue with
-		 * the new socket..
-		 */
-		if (nsk != sk) {
-			if (dccp_child_process(sk, nsk, skb))
-				goto reset;
-			if (opt_skb != NULL)
-				__kfree_skb(opt_skb);
-			return 0;
-		}
-	}
 
 	if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
 		goto reset;
@@ -742,6 +683,27 @@
 		goto no_dccp_socket;
 	}
 
+	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
+		struct request_sock *req = inet_reqsk(sk);
+		struct sock *nsk = NULL;
+
+		sk = req->rsk_listener;
+		if (sk->sk_state == DCCP_LISTEN)
+			nsk = dccp_check_req(sk, skb, req);
+		if (!nsk) {
+			reqsk_put(req);
+			goto discard_it;
+		}
+		if (nsk == sk) {
+			sock_hold(sk);
+			reqsk_put(req);
+		} else if (dccp_child_process(sk, nsk, skb)) {
+			dccp_v6_ctl_send_reset(sk, skb);
+			goto discard_it;
+		} else {
+			return 0;
+		}
+	}
 	/*
 	 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
 	 *	o if MinCsCov = 0, only packets with CsCov = 0 are accepted
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 30addee..d10aace 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -48,8 +48,6 @@
 			tw->tw_ipv6only = sk->sk_ipv6only;
 		}
 #endif
-		/* Linkage updates. */
-		__inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
 
 		/* Get the TIME_WAIT timeout firing. */
 		if (timeo < rto)
@@ -60,6 +58,8 @@
 			timeo = DCCP_TIMEWAIT_LEN;
 
 		inet_twsk_schedule(tw, timeo);
+		/* Linkage updates. */
+		__inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
 		inet_twsk_put(tw);
 	} else {
 		/* Sorry, if we're out of memory, just CLOSE this
@@ -72,7 +72,7 @@
 	dccp_done(sk);
 }
 
-struct sock *dccp_create_openreq_child(struct sock *sk,
+struct sock *dccp_create_openreq_child(const struct sock *sk,
 				       const struct request_sock *req,
 				       const struct sk_buff *skb)
 {
@@ -236,7 +236,7 @@
 
 EXPORT_SYMBOL_GPL(dccp_child_process);
 
-void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 			 struct request_sock *rsk)
 {
 	DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state");
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 0248e8a..4ce912e 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -390,7 +390,7 @@
 	return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
 }
 
-struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
+struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
 				   struct request_sock *req)
 {
 	struct dccp_hdr *dh;
@@ -398,13 +398,18 @@
 	const u32 dccp_header_size = sizeof(struct dccp_hdr) +
 				     sizeof(struct dccp_hdr_ext) +
 				     sizeof(struct dccp_hdr_response);
-	struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
-					   GFP_ATOMIC);
-	if (skb == NULL)
+	struct sk_buff *skb;
+
+	/* sk is marked const to clearly express we dont hold socket lock.
+	 * sock_wmalloc() will atomically change sk->sk_wmem_alloc,
+	 * it is safe to promote sk to non const.
+	 */
+	skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1,
+			   GFP_ATOMIC);
+	if (!skb)
 		return NULL;
 
-	/* Reserve space for headers. */
-	skb_reserve(skb, sk->sk_prot->max_header);
+	skb_reserve(skb, MAX_DCCP_HEADER);
 
 	skb_dst_set(skb, dst_clone(dst));
 
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 4b02dd3..849805e 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -85,7 +85,7 @@
 	if (dst) {
 try_again:
 		skb_dst_set(skb, dst);
-		dst_output(skb->sk, skb);
+		dst_output(&init_net, skb->sk, skb);
 		return;
 	}
 
@@ -582,7 +582,7 @@
 	 * associations.
 	 */
 	skb_dst_set(skb, dst_clone(dst));
-	dst_output(skb->sk, skb);
+	dst_output(&init_net, skb->sk, skb);
 }
 
 
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index e930321..27fce28 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -744,7 +744,7 @@
 	return NET_RX_DROP;
 }
 
-static int dn_output(struct sock *sk, struct sk_buff *skb)
+static int dn_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
 	struct dn_route *rt = (struct dn_route *)dst;
@@ -832,7 +832,7 @@
  * Used to catch bugs. This should never normally get
  * called.
  */
-static int dn_rt_bug_sk(struct sock *sk, struct sk_buff *skb)
+static int dn_rt_bug_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
 
@@ -1469,7 +1469,7 @@
 
 	rt->n = neigh;
 	rt->dst.lastuse = jiffies;
-	rt->dst.output = dn_rt_bug_sk;
+	rt->dst.output = dn_rt_bug_out;
 	switch (res.type) {
 	case RTN_UNICAST:
 		rt->dst.input = dn_forward;
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index af34fc9..85f2fdc 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -87,7 +87,7 @@
 }
 
 
-static unsigned int dnrmg_hook(const struct nf_hook_ops *ops,
+static unsigned int dnrmg_hook(void *priv,
 			struct sk_buff *skb,
 			const struct nf_hook_state *state)
 {
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 76e380076..aa398bc 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -326,8 +326,8 @@
 
 		ret = dsa_slave_create(ds, parent, i, pd->port_names[i]);
 		if (ret < 0) {
-			netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s)\n",
-				   index, i, pd->port_names[i]);
+			netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s): %d\n",
+				   index, i, pd->port_names[i], ret);
 			ret = 0;
 		}
 	}
@@ -634,6 +634,10 @@
 			port_index++;
 		}
 		kfree(pd->chip[i].rtable);
+
+		/* Drop our reference to the MDIO bus device */
+		if (pd->chip[i].host_dev)
+			put_device(pd->chip[i].host_dev);
 	}
 	kfree(pd->chip);
 }
@@ -661,16 +665,22 @@
 		return -EPROBE_DEFER;
 
 	ethernet = of_parse_phandle(np, "dsa,ethernet", 0);
-	if (!ethernet)
-		return -EINVAL;
+	if (!ethernet) {
+		ret = -EINVAL;
+		goto out_put_mdio;
+	}
 
 	ethernet_dev = of_find_net_device_by_node(ethernet);
-	if (!ethernet_dev)
-		return -EPROBE_DEFER;
+	if (!ethernet_dev) {
+		ret = -EPROBE_DEFER;
+		goto out_put_mdio;
+	}
 
 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
-	if (!pd)
-		return -ENOMEM;
+	if (!pd) {
+		ret = -ENOMEM;
+		goto out_put_ethernet;
+	}
 
 	dev->platform_data = pd;
 	pd->of_netdev = ethernet_dev;
@@ -691,7 +701,9 @@
 		cd = &pd->chip[chip_index];
 
 		cd->of_node = child;
-		cd->host_dev = &mdio_bus->dev;
+
+		/* When assigning the host device, increment its refcount */
+		cd->host_dev = get_device(&mdio_bus->dev);
 
 		sw_addr = of_get_property(child, "reg", NULL);
 		if (!sw_addr)
@@ -711,6 +723,12 @@
 				ret = -EPROBE_DEFER;
 				goto out_free_chip;
 			}
+
+			/* Drop the mdio_bus device ref, replacing the host
+			 * device with the mdio_bus_switch device, keeping
+			 * the refcount from of_mdio_find_bus() above.
+			 */
+			put_device(cd->host_dev);
 			cd->host_dev = &mdio_bus_switch->dev;
 		}
 
@@ -744,6 +762,10 @@
 		}
 	}
 
+	/* The individual chips hold their own refcount on the mdio bus,
+	 * so drop ours */
+	put_device(&mdio_bus->dev);
+
 	return 0;
 
 out_free_chip:
@@ -751,6 +773,10 @@
 out_free:
 	kfree(pd);
 	dev->platform_data = NULL;
+out_put_ethernet:
+	put_device(&ethernet_dev->dev);
+out_put_mdio:
+	put_device(&mdio_bus->dev);
 	return ret;
 }
 
@@ -762,6 +788,7 @@
 		return;
 
 	dsa_of_free_platform_data(pd);
+	put_device(&pd->of_netdev->dev);
 	kfree(pd);
 }
 #else
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index cce9738..bb2bd3b 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -242,16 +242,15 @@
 }
 
 static int dsa_slave_port_vlan_add(struct net_device *dev,
-				   struct switchdev_obj *obj)
+				   const struct switchdev_obj_port_vlan *vlan,
+				   struct switchdev_trans *trans)
 {
-	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
 	u16 vid;
 	int err;
 
-	switch (obj->trans) {
-	case SWITCHDEV_TRANS_PREPARE:
+	if (switchdev_trans_ph_prepare(trans)) {
 		if (!ds->drv->port_vlan_add || !ds->drv->port_pvid_set)
 			return -EOPNOTSUPP;
 
@@ -263,8 +262,7 @@
 						  vlan->vid_end);
 		if (err)
 			return err;
-		break;
-	case SWITCHDEV_TRANS_COMMIT:
+	} else {
 		for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
 			err = ds->drv->port_vlan_add(ds, p->port, vid,
 						     vlan->flags &
@@ -274,18 +272,14 @@
 			if (err)
 				return err;
 		}
-		break;
-	default:
-		return -EOPNOTSUPP;
 	}
 
 	return 0;
 }
 
 static int dsa_slave_port_vlan_del(struct net_device *dev,
-				   struct switchdev_obj *obj)
+				   const struct switchdev_obj_port_vlan *vlan)
 {
-	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
 	u16 vid;
@@ -304,9 +298,9 @@
 }
 
 static int dsa_slave_port_vlan_dump(struct net_device *dev,
-				    struct switchdev_obj *obj)
+				    struct switchdev_obj_port_vlan *vlan,
+				    switchdev_obj_dump_cb_t *cb)
 {
-	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
 	DECLARE_BITMAP(members, DSA_MAX_PORTS);
@@ -338,7 +332,7 @@
 		if (test_bit(p->port, untagged))
 			vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
 
-		err = obj->cb(dev, obj);
+		err = cb(&vlan->obj);
 		if (err)
 			break;
 	}
@@ -347,37 +341,40 @@
 }
 
 static int dsa_slave_port_fdb_add(struct net_device *dev,
-				  struct switchdev_obj *obj)
+				  const struct switchdev_obj_port_fdb *fdb,
+				  struct switchdev_trans *trans)
 {
-	struct switchdev_obj_fdb *fdb = &obj->u.fdb;
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
-	int ret = -EOPNOTSUPP;
+	int ret;
 
-	if (obj->trans == SWITCHDEV_TRANS_PREPARE)
-		ret = ds->drv->port_fdb_add ? 0 : -EOPNOTSUPP;
-	else if (obj->trans == SWITCHDEV_TRANS_COMMIT)
-		ret = ds->drv->port_fdb_add(ds, p->port, fdb->addr, fdb->vid);
+	if (!ds->drv->port_fdb_prepare || !ds->drv->port_fdb_add)
+		return -EOPNOTSUPP;
+
+	if (switchdev_trans_ph_prepare(trans))
+		ret = ds->drv->port_fdb_prepare(ds, p->port, fdb, trans);
+	else
+		ret = ds->drv->port_fdb_add(ds, p->port, fdb, trans);
 
 	return ret;
 }
 
 static int dsa_slave_port_fdb_del(struct net_device *dev,
-				  struct switchdev_obj *obj)
+				  const struct switchdev_obj_port_fdb *fdb)
 {
-	struct switchdev_obj_fdb *fdb = &obj->u.fdb;
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
 	int ret = -EOPNOTSUPP;
 
 	if (ds->drv->port_fdb_del)
-		ret = ds->drv->port_fdb_del(ds, p->port, fdb->addr, fdb->vid);
+		ret = ds->drv->port_fdb_del(ds, p->port, fdb);
 
 	return ret;
 }
 
 static int dsa_slave_port_fdb_dump(struct net_device *dev,
-				   struct switchdev_obj *obj)
+				   struct switchdev_obj_port_fdb *fdb,
+				   switchdev_obj_dump_cb_t *cb)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
@@ -396,11 +393,11 @@
 		if (ret < 0)
 			break;
 
-		obj->u.fdb.addr = addr;
-		obj->u.fdb.vid = vid;
-		obj->u.fdb.ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
+		fdb->addr = addr;
+		fdb->vid = vid;
+		fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
 
-		ret = obj->cb(dev, obj);
+		ret = cb(&fdb->obj);
 		if (ret < 0)
 			break;
 	}
@@ -456,14 +453,20 @@
 }
 
 static int dsa_slave_port_attr_set(struct net_device *dev,
-				   struct switchdev_attr *attr)
+				   struct switchdev_attr *attr,
+				   struct switchdev_trans *trans)
 {
-	int ret = 0;
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+	int ret;
 
 	switch (attr->id) {
-	case SWITCHDEV_ATTR_PORT_STP_STATE:
-		if (attr->trans == SWITCHDEV_TRANS_COMMIT)
-			ret = dsa_slave_stp_update(dev, attr->u.stp_state);
+	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+		if (switchdev_trans_ph_prepare(trans))
+			ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP;
+		else
+			ret = ds->drv->port_stp_update(ds, p->port,
+						       attr->u.stp_state);
 		break;
 	default:
 		ret = -EOPNOTSUPP;
@@ -474,7 +477,8 @@
 }
 
 static int dsa_slave_port_obj_add(struct net_device *dev,
-				  struct switchdev_obj *obj)
+				  const struct switchdev_obj *obj,
+				  struct switchdev_trans *trans)
 {
 	int err;
 
@@ -484,11 +488,15 @@
 	 */
 
 	switch (obj->id) {
-	case SWITCHDEV_OBJ_PORT_FDB:
-		err = dsa_slave_port_fdb_add(dev, obj);
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = dsa_slave_port_fdb_add(dev,
+					     SWITCHDEV_OBJ_PORT_FDB(obj),
+					     trans);
 		break;
-	case SWITCHDEV_OBJ_PORT_VLAN:
-		err = dsa_slave_port_vlan_add(dev, obj);
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = dsa_slave_port_vlan_add(dev,
+					      SWITCHDEV_OBJ_PORT_VLAN(obj),
+					      trans);
 		break;
 	default:
 		err = -EOPNOTSUPP;
@@ -499,16 +507,18 @@
 }
 
 static int dsa_slave_port_obj_del(struct net_device *dev,
-				  struct switchdev_obj *obj)
+				  const struct switchdev_obj *obj)
 {
 	int err;
 
 	switch (obj->id) {
-	case SWITCHDEV_OBJ_PORT_FDB:
-		err = dsa_slave_port_fdb_del(dev, obj);
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = dsa_slave_port_fdb_del(dev,
+					     SWITCHDEV_OBJ_PORT_FDB(obj));
 		break;
-	case SWITCHDEV_OBJ_PORT_VLAN:
-		err = dsa_slave_port_vlan_del(dev, obj);
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = dsa_slave_port_vlan_del(dev,
+					      SWITCHDEV_OBJ_PORT_VLAN(obj));
 		break;
 	default:
 		err = -EOPNOTSUPP;
@@ -519,16 +529,21 @@
 }
 
 static int dsa_slave_port_obj_dump(struct net_device *dev,
-				   struct switchdev_obj *obj)
+				   struct switchdev_obj *obj,
+				   switchdev_obj_dump_cb_t *cb)
 {
 	int err;
 
 	switch (obj->id) {
-	case SWITCHDEV_OBJ_PORT_FDB:
-		err = dsa_slave_port_fdb_dump(dev, obj);
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = dsa_slave_port_fdb_dump(dev,
+					      SWITCHDEV_OBJ_PORT_FDB(obj),
+					      cb);
 		break;
-	case SWITCHDEV_OBJ_PORT_VLAN:
-		err = dsa_slave_port_vlan_dump(dev, obj);
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = dsa_slave_port_vlan_dump(dev,
+					       SWITCHDEV_OBJ_PORT_VLAN(obj),
+					       cb);
 		break;
 	default:
 		err = -EOPNOTSUPP;
@@ -582,7 +597,7 @@
 	struct dsa_switch *ds = p->parent;
 
 	switch (attr->id) {
-	case SWITCHDEV_ATTR_PORT_PARENT_ID:
+	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
 		attr->u.ppid.id_len = sizeof(ds->index);
 		memcpy(&attr->u.ppid.id, &ds->index, attr->u.ppid.id_len);
 		break;
@@ -962,6 +977,10 @@
 	.switchdev_port_obj_dump	= dsa_slave_port_obj_dump,
 };
 
+static struct device_type dsa_type = {
+	.name	= "dsa",
+};
+
 static void dsa_slave_adjust_link(struct net_device *dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
@@ -1010,8 +1029,10 @@
 	struct dsa_switch *ds = p->parent;
 
 	p->phy = ds->slave_mii_bus->phy_map[addr];
-	if (!p->phy)
+	if (!p->phy) {
+		netdev_err(slave_dev, "no phy at %d\n", addr);
 		return -ENODEV;
+	}
 
 	/* Use already configured phy mode */
 	if (p->phy_interface == PHY_INTERFACE_MODE_NA)
@@ -1045,7 +1066,7 @@
 		 */
 		ret = of_phy_register_fixed_link(port_dn);
 		if (ret) {
-			netdev_err(slave_dev, "failed to register fixed PHY\n");
+			netdev_err(slave_dev, "failed to register fixed PHY: %d\n", ret);
 			return ret;
 		}
 		phy_is_fixed = true;
@@ -1056,17 +1077,20 @@
 		phy_flags = ds->drv->get_phy_flags(ds, p->port);
 
 	if (phy_dn) {
-		ret = of_mdio_parse_addr(&slave_dev->dev, phy_dn);
+		int phy_id = of_mdio_parse_addr(&slave_dev->dev, phy_dn);
+
 		/* If this PHY address is part of phys_mii_mask, which means
 		 * that we need to divert reads and writes to/from it, then we
 		 * want to bind this device using the slave MII bus created by
 		 * DSA to make that happen.
 		 */
-		if (!phy_is_fixed && ret >= 0 &&
-		    (ds->phys_mii_mask & (1 << ret))) {
-			ret = dsa_slave_phy_connect(p, slave_dev, ret);
-			if (ret)
+		if (!phy_is_fixed && phy_id >= 0 &&
+		    (ds->phys_mii_mask & (1 << phy_id))) {
+			ret = dsa_slave_phy_connect(p, slave_dev, phy_id);
+			if (ret) {
+				netdev_err(slave_dev, "failed to connect to phy%d: %d\n", phy_id, ret);
 				return ret;
+			}
 		} else {
 			p->phy = of_phy_connect(slave_dev, phy_dn,
 						dsa_slave_adjust_link,
@@ -1083,8 +1107,10 @@
 	 */
 	if (!p->phy) {
 		ret = dsa_slave_phy_connect(p, slave_dev, p->port);
-		if (ret)
+		if (ret) {
+			netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret);
 			return ret;
+		}
 	} else {
 		netdev_info(slave_dev, "attached PHY at address %d [%s]\n",
 			    p->phy->addr, p->phy->drv->name);
@@ -1150,6 +1176,7 @@
 	slave_dev->priv_flags |= IFF_NO_QUEUE;
 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
 	slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
+	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
 
 	netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
 				 NULL);
@@ -1195,6 +1222,7 @@
 
 	ret = dsa_slave_phy_setup(p, slave_dev);
 	if (ret) {
+		netdev_err(master, "error %d setting up slave phy\n", ret);
 		free_netdev(slave_dev);
 		return ret;
 	}
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index d25efc9..b6ca089 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -78,7 +78,7 @@
 
 	trailer = skb_tail_pointer(skb) - 4;
 	if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 ||
-	    (trailer[3] & 0xef) != 0x00 || trailer[3] != 0x00)
+	    (trailer[2] & 0xef) != 0x00 || trailer[3] != 0x00)
 		goto out_drop;
 
 	source_port = trailer[1] & 7;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index d850fdc..9e63f25 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -127,7 +127,7 @@
 	struct flow_keys keys;
 
 	/* this should never happen, but better safe than sorry */
-	if (len < sizeof(*eth))
+	if (unlikely(len < sizeof(*eth)))
 		return len;
 
 	/* parse any remaining L2/L3 headers, check for L4 */
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
index ea339fa9..b4e17a7 100644
--- a/net/ieee802154/6lowpan/6lowpan_i.h
+++ b/net/ieee802154/6lowpan/6lowpan_i.h
@@ -7,6 +7,15 @@
 #include <net/inet_frag.h>
 #include <net/6lowpan.h>
 
+typedef unsigned __bitwise__ lowpan_rx_result;
+#define RX_CONTINUE		((__force lowpan_rx_result) 0u)
+#define RX_DROP_UNUSABLE	((__force lowpan_rx_result) 1u)
+#define RX_DROP			((__force lowpan_rx_result) 2u)
+#define RX_QUEUED		((__force lowpan_rx_result) 3u)
+
+#define LOWPAN_DISPATCH_FRAG1           0xc0
+#define LOWPAN_DISPATCH_FRAGN           0xe0
+
 struct lowpan_create_arg {
 	u16 tag;
 	u16 d_size;
@@ -40,7 +49,7 @@
 
 /* private device info */
 struct lowpan_dev_info {
-	struct net_device	*real_dev; /* real WPAN device ptr */
+	struct net_device	*wdev; /* wpan device ptr */
 	u16			fragment_tag;
 };
 
@@ -62,4 +71,7 @@
 			 const void *_saddr, unsigned int len);
 netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev);
 
+int lowpan_iphc_decompress(struct sk_buff *skb);
+lowpan_rx_result lowpan_rx_h_ipv6(struct sk_buff *skb);
+
 #endif /* __IEEE802154_6LOWPAN_I_H__ */
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 953b1c4..20c49c7 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -61,7 +61,7 @@
 static struct lock_class_key lowpan_tx_busylock;
 static struct lock_class_key lowpan_netdev_xmit_lock_key;
 
-static void lowpan_set_lockdep_class_one(struct net_device *dev,
+static void lowpan_set_lockdep_class_one(struct net_device *ldev,
 					 struct netdev_queue *txq,
 					 void *_unused)
 {
@@ -69,35 +69,47 @@
 			  &lowpan_netdev_xmit_lock_key);
 }
 
-static int lowpan_dev_init(struct net_device *dev)
+static int lowpan_dev_init(struct net_device *ldev)
 {
-	netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
-	dev->qdisc_tx_busylock = &lowpan_tx_busylock;
+	netdev_for_each_tx_queue(ldev, lowpan_set_lockdep_class_one, NULL);
+	ldev->qdisc_tx_busylock = &lowpan_tx_busylock;
+	return 0;
+}
+
+static int lowpan_open(struct net_device *dev)
+{
+	if (!open_count)
+		lowpan_rx_init();
+	open_count++;
+	return 0;
+}
+
+static int lowpan_stop(struct net_device *dev)
+{
+	open_count--;
+	if (!open_count)
+		lowpan_rx_exit();
 	return 0;
 }
 
 static const struct net_device_ops lowpan_netdev_ops = {
 	.ndo_init		= lowpan_dev_init,
 	.ndo_start_xmit		= lowpan_xmit,
+	.ndo_open		= lowpan_open,
+	.ndo_stop		= lowpan_stop,
 };
 
-static void lowpan_setup(struct net_device *dev)
+static void lowpan_setup(struct net_device *ldev)
 {
-	dev->addr_len		= IEEE802154_ADDR_LEN;
-	memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
-	dev->type		= ARPHRD_6LOWPAN;
-	/* Frame Control + Sequence Number + Address fields + Security Header */
-	dev->hard_header_len	= 2 + 1 + 20 + 14;
-	dev->needed_tailroom	= 2; /* FCS */
-	dev->mtu		= IPV6_MIN_MTU;
-	dev->priv_flags		|= IFF_NO_QUEUE;
-	dev->flags		= IFF_BROADCAST | IFF_MULTICAST;
-	dev->watchdog_timeo	= 0;
+	memset(ldev->broadcast, 0xff, IEEE802154_ADDR_LEN);
+	/* We need an ipv6hdr as minimum len when calling xmit */
+	ldev->hard_header_len	= sizeof(struct ipv6hdr);
+	ldev->flags		= IFF_BROADCAST | IFF_MULTICAST;
 
-	dev->netdev_ops		= &lowpan_netdev_ops;
-	dev->header_ops		= &lowpan_header_ops;
-	dev->destructor		= free_netdev;
-	dev->features		|= NETIF_F_NETNS_LOCAL;
+	ldev->netdev_ops	= &lowpan_netdev_ops;
+	ldev->header_ops	= &lowpan_header_ops;
+	ldev->destructor	= free_netdev;
+	ldev->features		|= NETIF_F_NETNS_LOCAL;
 }
 
 static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -109,10 +121,10 @@
 	return 0;
 }
 
-static int lowpan_newlink(struct net *src_net, struct net_device *dev,
+static int lowpan_newlink(struct net *src_net, struct net_device *ldev,
 			  struct nlattr *tb[], struct nlattr *data[])
 {
-	struct net_device *real_dev;
+	struct net_device *wdev;
 	int ret;
 
 	ASSERT_RTNL();
@@ -120,58 +132,56 @@
 	pr_debug("adding new link\n");
 
 	if (!tb[IFLA_LINK] ||
-	    !net_eq(dev_net(dev), &init_net))
+	    !net_eq(dev_net(ldev), &init_net))
 		return -EINVAL;
-	/* find and hold real wpan device */
-	real_dev = dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK]));
-	if (!real_dev)
+	/* find and hold wpan device */
+	wdev = dev_get_by_index(dev_net(ldev), nla_get_u32(tb[IFLA_LINK]));
+	if (!wdev)
 		return -ENODEV;
-	if (real_dev->type != ARPHRD_IEEE802154) {
-		dev_put(real_dev);
+	if (wdev->type != ARPHRD_IEEE802154) {
+		dev_put(wdev);
 		return -EINVAL;
 	}
 
-	if (real_dev->ieee802154_ptr->lowpan_dev) {
-		dev_put(real_dev);
+	if (wdev->ieee802154_ptr->lowpan_dev) {
+		dev_put(wdev);
 		return -EBUSY;
 	}
 
-	lowpan_dev_info(dev)->real_dev = real_dev;
+	lowpan_dev_info(ldev)->wdev = wdev;
 	/* Set the lowpan hardware address to the wpan hardware address. */
-	memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
+	memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN);
+	/* We need headroom for possible wpan_dev_hard_header call and tailroom
+	 * for encryption/fcs handling. The lowpan interface will replace
+	 * the IPv6 header with 6LoWPAN header. At worst case the 6LoWPAN
+	 * header has LOWPAN_IPHC_MAX_HEADER_LEN more bytes than the IPv6
+	 * header.
+	 */
+	ldev->needed_headroom = LOWPAN_IPHC_MAX_HEADER_LEN +
+				wdev->needed_headroom;
+	ldev->needed_tailroom = wdev->needed_tailroom;
 
-	lowpan_netdev_setup(dev, LOWPAN_LLTYPE_IEEE802154);
+	lowpan_netdev_setup(ldev, LOWPAN_LLTYPE_IEEE802154);
 
-	ret = register_netdevice(dev);
+	ret = register_netdevice(ldev);
 	if (ret < 0) {
-		dev_put(real_dev);
+		dev_put(wdev);
 		return ret;
 	}
 
-	real_dev->ieee802154_ptr->lowpan_dev = dev;
-	if (!open_count)
-		lowpan_rx_init();
-
-	open_count++;
-
+	wdev->ieee802154_ptr->lowpan_dev = ldev;
 	return 0;
 }
 
-static void lowpan_dellink(struct net_device *dev, struct list_head *head)
+static void lowpan_dellink(struct net_device *ldev, struct list_head *head)
 {
-	struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
-	struct net_device *real_dev = lowpan_dev->real_dev;
+	struct net_device *wdev = lowpan_dev_info(ldev)->wdev;
 
 	ASSERT_RTNL();
 
-	open_count--;
-
-	if (!open_count)
-		lowpan_rx_exit();
-
-	real_dev->ieee802154_ptr->lowpan_dev = NULL;
-	unregister_netdevice(dev);
-	dev_put(real_dev);
+	wdev->ieee802154_ptr->lowpan_dev = NULL;
+	unregister_netdevice(ldev);
+	dev_put(wdev);
 }
 
 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
@@ -196,9 +206,9 @@
 static int lowpan_device_event(struct notifier_block *unused,
 			       unsigned long event, void *ptr)
 {
-	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct net_device *wdev = netdev_notifier_info_to_dev(ptr);
 
-	if (dev->type != ARPHRD_IEEE802154)
+	if (wdev->type != ARPHRD_IEEE802154)
 		goto out;
 
 	switch (event) {
@@ -207,8 +217,8 @@
 		 * also delete possible lowpan interfaces which belongs
 		 * to the wpan interface.
 		 */
-		if (dev->ieee802154_ptr && dev->ieee802154_ptr->lowpan_dev)
-			lowpan_dellink(dev->ieee802154_ptr->lowpan_dev, NULL);
+		if (wdev->ieee802154_ptr->lowpan_dev)
+			lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL);
 		break;
 	default:
 		break;
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index 214d44a..12e8cf4 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -32,21 +32,10 @@
 
 static const char lowpan_frags_cache_name[] = "lowpan-frags";
 
-struct lowpan_frag_info {
-	u16 d_tag;
-	u16 d_size;
-	u8 d_offset;
-};
-
-static struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
-{
-	return (struct lowpan_frag_info *)skb->cb;
-}
-
 static struct inet_frags lowpan_frags;
 
 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
-			     struct sk_buff *prev, struct net_device *dev);
+			     struct sk_buff *prev, struct net_device *ldev);
 
 static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
 				     const struct ieee802154_addr *saddr,
@@ -111,7 +100,7 @@
 }
 
 static inline struct lowpan_frag_queue *
-fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
+fq_find(struct net *net, const struct lowpan_802154_cb *cb,
 	const struct ieee802154_addr *src,
 	const struct ieee802154_addr *dst)
 {
@@ -121,12 +110,12 @@
 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
 		net_ieee802154_lowpan(net);
 
-	arg.tag = frag_info->d_tag;
-	arg.d_size = frag_info->d_size;
+	arg.tag = cb->d_tag;
+	arg.d_size = cb->d_size;
 	arg.src = src;
 	arg.dst = dst;
 
-	hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
+	hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
 
 	q = inet_frag_find(&ieee802154_lowpan->frags,
 			   &lowpan_frags, &arg, hash);
@@ -138,17 +127,17 @@
 }
 
 static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
-			     struct sk_buff *skb, const u8 frag_type)
+			     struct sk_buff *skb, u8 frag_type)
 {
 	struct sk_buff *prev, *next;
-	struct net_device *dev;
+	struct net_device *ldev;
 	int end, offset;
 
 	if (fq->q.flags & INET_FRAG_COMPLETE)
 		goto err;
 
-	offset = lowpan_cb(skb)->d_offset << 3;
-	end = lowpan_cb(skb)->d_size;
+	offset = lowpan_802154_cb(skb)->d_offset << 3;
+	end = lowpan_802154_cb(skb)->d_size;
 
 	/* Is this the final fragment? */
 	if (offset + skb->len == end) {
@@ -174,13 +163,16 @@
 	 * this fragment, right?
 	 */
 	prev = fq->q.fragments_tail;
-	if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) {
+	if (!prev ||
+	    lowpan_802154_cb(prev)->d_offset <
+	    lowpan_802154_cb(skb)->d_offset) {
 		next = NULL;
 		goto found;
 	}
 	prev = NULL;
 	for (next = fq->q.fragments; next != NULL; next = next->next) {
-		if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset)
+		if (lowpan_802154_cb(next)->d_offset >=
+		    lowpan_802154_cb(skb)->d_offset)
 			break;	/* bingo! */
 		prev = next;
 	}
@@ -195,18 +187,15 @@
 	else
 		fq->q.fragments = skb;
 
-	dev = skb->dev;
-	if (dev)
+	ldev = skb->dev;
+	if (ldev)
 		skb->dev = NULL;
 
 	fq->q.stamp = skb->tstamp;
-	if (frag_type == LOWPAN_DISPATCH_FRAG1) {
-		/* Calculate uncomp. 6lowpan header to estimate full size */
-		fq->q.meat += lowpan_uncompress_size(skb, NULL);
+	if (frag_type == LOWPAN_DISPATCH_FRAG1)
 		fq->q.flags |= INET_FRAG_FIRST_IN;
-	} else {
-		fq->q.meat += skb->len;
-	}
+
+	fq->q.meat += skb->len;
 	add_frag_mem_limit(fq->q.net, skb->truesize);
 
 	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
@@ -215,7 +204,7 @@
 		unsigned long orefdst = skb->_skb_refdst;
 
 		skb->_skb_refdst = 0UL;
-		res = lowpan_frag_reasm(fq, prev, dev);
+		res = lowpan_frag_reasm(fq, prev, ldev);
 		skb->_skb_refdst = orefdst;
 		return res;
 	}
@@ -235,7 +224,7 @@
  *	the last and the first frames arrived and all the bits are here.
  */
 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
-			     struct net_device *dev)
+			     struct net_device *ldev)
 {
 	struct sk_buff *fp, *head = fq->q.fragments;
 	int sum_truesize;
@@ -313,7 +302,7 @@
 	sub_frag_mem_limit(fq->q.net, sum_truesize);
 
 	head->next = NULL;
-	head->dev = dev;
+	head->dev = ldev;
 	head->tstamp = fq->q.stamp;
 
 	fq->q.fragments = NULL;
@@ -325,24 +314,87 @@
 	return -1;
 }
 
-static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
-				struct lowpan_frag_info *frag_info)
+static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
+					  lowpan_rx_result res)
+{
+	switch (res) {
+	case RX_QUEUED:
+		return NET_RX_SUCCESS;
+	case RX_CONTINUE:
+		/* nobody cared about this packet */
+		net_warn_ratelimited("%s: received unknown dispatch\n",
+				     __func__);
+
+		/* fall-through */
+	default:
+		/* all others failure */
+		return NET_RX_DROP;
+	}
+}
+
+static lowpan_rx_result lowpan_frag_rx_h_iphc(struct sk_buff *skb)
+{
+	int ret;
+
+	if (!lowpan_is_iphc(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	ret = lowpan_iphc_decompress(skb);
+	if (ret < 0)
+		return RX_DROP;
+
+	return RX_QUEUED;
+}
+
+static int lowpan_invoke_frag_rx_handlers(struct sk_buff *skb)
+{
+	lowpan_rx_result res;
+
+#define CALL_RXH(rxh)			\
+	do {				\
+		res = rxh(skb);	\
+		if (res != RX_CONTINUE)	\
+			goto rxh_next;	\
+	} while (0)
+
+	/* likely at first */
+	CALL_RXH(lowpan_frag_rx_h_iphc);
+	CALL_RXH(lowpan_rx_h_ipv6);
+
+rxh_next:
+	return lowpan_frag_rx_handlers_result(skb, res);
+#undef CALL_RXH
+}
+
+#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK	0x07
+#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT	8
+
+static int lowpan_get_cb(struct sk_buff *skb, u8 frag_type,
+			 struct lowpan_802154_cb *cb)
 {
 	bool fail;
-	u8 pattern = 0, low = 0;
+	u8 high = 0, low = 0;
 	__be16 d_tag = 0;
 
-	fail = lowpan_fetch_skb(skb, &pattern, 1);
+	fail = lowpan_fetch_skb(skb, &high, 1);
 	fail |= lowpan_fetch_skb(skb, &low, 1);
-	frag_info->d_size = (pattern & 7) << 8 | low;
+	/* remove the dispatch value and use first three bits as high value
+	 * for the datagram size
+	 */
+	cb->d_size = (high & LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK) <<
+		LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT | low;
 	fail |= lowpan_fetch_skb(skb, &d_tag, 2);
-	frag_info->d_tag = ntohs(d_tag);
+	cb->d_tag = ntohs(d_tag);
 
 	if (frag_type == LOWPAN_DISPATCH_FRAGN) {
-		fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
+		fail |= lowpan_fetch_skb(skb, &cb->d_offset, 1);
 	} else {
 		skb_reset_network_header(skb);
-		frag_info->d_offset = 0;
+		cb->d_offset = 0;
+		/* check if datagram_size has ipv6hdr on FRAG1 */
+		fail |= cb->d_size < sizeof(struct ipv6hdr);
+		/* check if we can dereference the dispatch value */
+		fail |= !skb->len;
 	}
 
 	if (unlikely(fail))
@@ -351,27 +403,33 @@
 	return 0;
 }
 
-int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
+int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
 {
 	struct lowpan_frag_queue *fq;
 	struct net *net = dev_net(skb->dev);
-	struct lowpan_frag_info *frag_info = lowpan_cb(skb);
-	struct ieee802154_addr source, dest;
+	struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
+	struct ieee802154_hdr hdr;
 	int err;
 
-	source = mac_cb(skb)->source;
-	dest = mac_cb(skb)->dest;
+	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
+		goto err;
 
-	err = lowpan_get_frag_info(skb, frag_type, frag_info);
+	err = lowpan_get_cb(skb, frag_type, cb);
 	if (err < 0)
 		goto err;
 
-	if (frag_info->d_size > IPV6_MIN_MTU) {
+	if (frag_type == LOWPAN_DISPATCH_FRAG1) {
+		err = lowpan_invoke_frag_rx_handlers(skb);
+		if (err == NET_RX_DROP)
+			goto err;
+	}
+
+	if (cb->d_size > IPV6_MIN_MTU) {
 		net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
 		goto err;
 	}
 
-	fq = fq_find(net, frag_info, &source, &dest);
+	fq = fq_find(net, cb, &hdr.source, &hdr.dest);
 	if (fq != NULL) {
 		int ret;
 
@@ -387,7 +445,6 @@
 	kfree_skb(skb);
 	return -1;
 }
-EXPORT_SYMBOL(lowpan_frag_rcv);
 
 #ifdef CONFIG_SYSCTL
 static int zero;
diff --git a/net/ieee802154/6lowpan/rx.c b/net/ieee802154/6lowpan/rx.c
index 12e1020..65d55e0 100644
--- a/net/ieee802154/6lowpan/rx.c
+++ b/net/ieee802154/6lowpan/rx.c
@@ -11,40 +11,101 @@
 #include <linux/if_arp.h>
 
 #include <net/6lowpan.h>
+#include <net/mac802154.h>
 #include <net/ieee802154_netdev.h>
 
 #include "6lowpan_i.h"
 
-static int lowpan_give_skb_to_device(struct sk_buff *skb,
-				     struct net_device *dev)
+#define LOWPAN_DISPATCH_FIRST		0xc0
+#define LOWPAN_DISPATCH_FRAG_MASK	0xf8
+
+#define LOWPAN_DISPATCH_NALP		0x00
+#define LOWPAN_DISPATCH_ESC		0x40
+#define LOWPAN_DISPATCH_HC1		0x42
+#define LOWPAN_DISPATCH_DFF		0x43
+#define LOWPAN_DISPATCH_BC0		0x50
+#define LOWPAN_DISPATCH_MESH		0x80
+
+static int lowpan_give_skb_to_device(struct sk_buff *skb)
 {
-	skb->dev = dev->ieee802154_ptr->lowpan_dev;
 	skb->protocol = htons(ETH_P_IPV6);
-	skb->pkt_type = PACKET_HOST;
+	skb->dev->stats.rx_packets++;
+	skb->dev->stats.rx_bytes += skb->len;
 
 	return netif_rx(skb);
 }
 
-static int
-iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
+static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res)
 {
-	u8 iphc0, iphc1;
+	switch (res) {
+	case RX_CONTINUE:
+		/* nobody cared about this packet */
+		net_warn_ratelimited("%s: received unknown dispatch\n",
+				     __func__);
+
+		/* fall-through */
+	case RX_DROP_UNUSABLE:
+		kfree_skb(skb);
+
+		/* fall-through */
+	case RX_DROP:
+		return NET_RX_DROP;
+	case RX_QUEUED:
+		return lowpan_give_skb_to_device(skb);
+	default:
+		break;
+	}
+
+	return NET_RX_DROP;
+}
+
+static inline bool lowpan_is_frag1(u8 dispatch)
+{
+	return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAG1;
+}
+
+static inline bool lowpan_is_fragn(u8 dispatch)
+{
+	return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAGN;
+}
+
+static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb)
+{
+	int ret;
+
+	if (!(lowpan_is_frag1(*skb_network_header(skb)) ||
+	      lowpan_is_fragn(*skb_network_header(skb))))
+		return RX_CONTINUE;
+
+	ret = lowpan_frag_rcv(skb, *skb_network_header(skb) &
+			      LOWPAN_DISPATCH_FRAG_MASK);
+	if (ret == 1)
+		return RX_QUEUED;
+
+	/* Packet is freed by lowpan_frag_rcv on error or put into the frag
+	 * bucket.
+	 */
+	return RX_DROP;
+}
+
+int lowpan_iphc_decompress(struct sk_buff *skb)
+{
 	struct ieee802154_addr_sa sa, da;
+	struct ieee802154_hdr hdr;
+	u8 iphc0, iphc1;
 	void *sap, *dap;
 
+	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
+		return -EINVAL;
+
 	raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
-	/* at least two bytes will be used for the encoding */
-	if (skb->len < 2)
+
+	if (lowpan_fetch_skb_u8(skb, &iphc0) ||
+	    lowpan_fetch_skb_u8(skb, &iphc1))
 		return -EINVAL;
 
-	if (lowpan_fetch_skb_u8(skb, &iphc0))
-		return -EINVAL;
-
-	if (lowpan_fetch_skb_u8(skb, &iphc1))
-		return -EINVAL;
-
-	ieee802154_addr_to_sa(&sa, &hdr->source);
-	ieee802154_addr_to_sa(&da, &hdr->dest);
+	ieee802154_addr_to_sa(&sa, &hdr.source);
+	ieee802154_addr_to_sa(&da, &hdr.dest);
 
 	if (sa.addr_type == IEEE802154_ADDR_SHORT)
 		sap = &sa.short_addr;
@@ -61,77 +122,216 @@
 					IEEE802154_ADDR_LEN, iphc0, iphc1);
 }
 
-static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
-		      struct packet_type *pt, struct net_device *orig_dev)
+static lowpan_rx_result lowpan_rx_h_iphc(struct sk_buff *skb)
 {
-	struct ieee802154_hdr hdr;
 	int ret;
 
-	if (dev->type != ARPHRD_IEEE802154 ||
-	    !dev->ieee802154_ptr->lowpan_dev)
-		goto drop;
+	if (!lowpan_is_iphc(*skb_network_header(skb)))
+		return RX_CONTINUE;
 
+	/* Setting datagram_offset to zero indicates non frag handling
+	 * while doing lowpan_header_decompress.
+	 */
+	lowpan_802154_cb(skb)->d_size = 0;
+
+	ret = lowpan_iphc_decompress(skb);
+	if (ret < 0)
+		return RX_DROP_UNUSABLE;
+
+	return RX_QUEUED;
+}
+
+lowpan_rx_result lowpan_rx_h_ipv6(struct sk_buff *skb)
+{
+	if (!lowpan_is_ipv6(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	/* Pull off the 1-byte of 6lowpan header. */
+	skb_pull(skb, 1);
+	return RX_QUEUED;
+}
+
+static inline bool lowpan_is_esc(u8 dispatch)
+{
+	return dispatch == LOWPAN_DISPATCH_ESC;
+}
+
+static lowpan_rx_result lowpan_rx_h_esc(struct sk_buff *skb)
+{
+	if (!lowpan_is_esc(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	net_warn_ratelimited("%s: %s\n", skb->dev->name,
+			     "6LoWPAN ESC not supported\n");
+
+	return RX_DROP_UNUSABLE;
+}
+
+static inline bool lowpan_is_hc1(u8 dispatch)
+{
+	return dispatch == LOWPAN_DISPATCH_HC1;
+}
+
+static lowpan_rx_result lowpan_rx_h_hc1(struct sk_buff *skb)
+{
+	if (!lowpan_is_hc1(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	net_warn_ratelimited("%s: %s\n", skb->dev->name,
+			     "6LoWPAN HC1 not supported\n");
+
+	return RX_DROP_UNUSABLE;
+}
+
+static inline bool lowpan_is_dff(u8 dispatch)
+{
+	return dispatch == LOWPAN_DISPATCH_DFF;
+}
+
+static lowpan_rx_result lowpan_rx_h_dff(struct sk_buff *skb)
+{
+	if (!lowpan_is_dff(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	net_warn_ratelimited("%s: %s\n", skb->dev->name,
+			     "6LoWPAN DFF not supported\n");
+
+	return RX_DROP_UNUSABLE;
+}
+
+static inline bool lowpan_is_bc0(u8 dispatch)
+{
+	return dispatch == LOWPAN_DISPATCH_BC0;
+}
+
+static lowpan_rx_result lowpan_rx_h_bc0(struct sk_buff *skb)
+{
+	if (!lowpan_is_bc0(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	net_warn_ratelimited("%s: %s\n", skb->dev->name,
+			     "6LoWPAN BC0 not supported\n");
+
+	return RX_DROP_UNUSABLE;
+}
+
+static inline bool lowpan_is_mesh(u8 dispatch)
+{
+	return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_MESH;
+}
+
+static lowpan_rx_result lowpan_rx_h_mesh(struct sk_buff *skb)
+{
+	if (!lowpan_is_mesh(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	net_warn_ratelimited("%s: %s\n", skb->dev->name,
+			     "6LoWPAN MESH not supported\n");
+
+	return RX_DROP_UNUSABLE;
+}
+
+static int lowpan_invoke_rx_handlers(struct sk_buff *skb)
+{
+	lowpan_rx_result res;
+
+#define CALL_RXH(rxh)			\
+	do {				\
+		res = rxh(skb);	\
+		if (res != RX_CONTINUE)	\
+			goto rxh_next;	\
+	} while (0)
+
+	/* likely at first */
+	CALL_RXH(lowpan_rx_h_iphc);
+	CALL_RXH(lowpan_rx_h_frag);
+	CALL_RXH(lowpan_rx_h_ipv6);
+	CALL_RXH(lowpan_rx_h_esc);
+	CALL_RXH(lowpan_rx_h_hc1);
+	CALL_RXH(lowpan_rx_h_dff);
+	CALL_RXH(lowpan_rx_h_bc0);
+	CALL_RXH(lowpan_rx_h_mesh);
+
+rxh_next:
+	return lowpan_rx_handlers_result(skb, res);
+#undef CALL_RXH
+}
+
+static inline bool lowpan_is_nalp(u8 dispatch)
+{
+	return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_NALP;
+}
+
+/* Lookup for reserved dispatch values at:
+ * https://www.iana.org/assignments/_6lowpan-parameters/_6lowpan-parameters.xhtml#_6lowpan-parameters-1
+ *
+ * Last Updated: 2015-01-22
+ */
+static inline bool lowpan_is_reserved(u8 dispatch)
+{
+	return ((dispatch >= 0x44 && dispatch <= 0x4F) ||
+		(dispatch >= 0x51 && dispatch <= 0x5F) ||
+		(dispatch >= 0xc8 && dispatch <= 0xdf) ||
+		(dispatch >= 0xe8 && dispatch <= 0xff));
+}
+
+/* lowpan_rx_h_check checks on generic 6LoWPAN requirements
+ * in MAC and 6LoWPAN header.
+ *
+ * Don't manipulate the skb here, it could be shared buffer.
+ */
+static inline bool lowpan_rx_h_check(struct sk_buff *skb)
+{
+	__le16 fc = ieee802154_get_fc_from_skb(skb);
+
+	/* check on ieee802154 conform 6LoWPAN header */
+	if (!ieee802154_is_data(fc) ||
+	    !ieee802154_is_intra_pan(fc))
+		return false;
+
+	/* check if we can dereference the dispatch */
+	if (unlikely(!skb->len))
+		return false;
+
+	if (lowpan_is_nalp(*skb_network_header(skb)) ||
+	    lowpan_is_reserved(*skb_network_header(skb)))
+		return false;
+
+	return true;
+}
+
+static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev,
+		      struct packet_type *pt, struct net_device *orig_wdev)
+{
+	struct net_device *ldev;
+
+	if (wdev->type != ARPHRD_IEEE802154 ||
+	    skb->pkt_type == PACKET_OTHERHOST ||
+	    !lowpan_rx_h_check(skb))
+		return NET_RX_DROP;
+
+	ldev = wdev->ieee802154_ptr->lowpan_dev;
+	if (!ldev || !netif_running(ldev))
+		return NET_RX_DROP;
+
+	/* Replacing skb->dev and followed rx handlers will manipulate skb. */
 	skb = skb_share_check(skb, GFP_ATOMIC);
 	if (!skb)
-		goto drop;
+		return NET_RX_DROP;
+	skb->dev = ldev;
 
-	if (!netif_running(dev))
-		goto drop_skb;
-
-	if (skb->pkt_type == PACKET_OTHERHOST)
-		goto drop_skb;
-
-	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
-		goto drop_skb;
-
-	/* check that it's our buffer */
-	if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
-		/* Pull off the 1-byte of 6lowpan header. */
-		skb_pull(skb, 1);
-		return lowpan_give_skb_to_device(skb, dev);
-	} else {
-		switch (skb->data[0] & 0xe0) {
-		case LOWPAN_DISPATCH_IPHC:	/* ipv6 datagram */
-			ret = iphc_decompress(skb, &hdr);
-			if (ret < 0)
-				goto drop_skb;
-
-			return lowpan_give_skb_to_device(skb, dev);
-		case LOWPAN_DISPATCH_FRAG1:	/* first fragment header */
-			ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
-			if (ret == 1) {
-				ret = iphc_decompress(skb, &hdr);
-				if (ret < 0)
-					goto drop_skb;
-
-				return lowpan_give_skb_to_device(skb, dev);
-			} else if (ret == -1) {
-				return NET_RX_DROP;
-			} else {
-				return NET_RX_SUCCESS;
-			}
-		case LOWPAN_DISPATCH_FRAGN:	/* next fragments headers */
-			ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN);
-			if (ret == 1) {
-				ret = iphc_decompress(skb, &hdr);
-				if (ret < 0)
-					goto drop_skb;
-
-				return lowpan_give_skb_to_device(skb, dev);
-			} else if (ret == -1) {
-				return NET_RX_DROP;
-			} else {
-				return NET_RX_SUCCESS;
-			}
-		default:
-			break;
-		}
+	/* When receive frag1 it's likely that we manipulate the buffer.
+	 * When recevie iphc we manipulate the data buffer. So we need
+	 * to unshare the buffer.
+	 */
+	if (lowpan_is_frag1(*skb_network_header(skb)) ||
+	    lowpan_is_iphc(*skb_network_header(skb))) {
+		skb = skb_unshare(skb, GFP_ATOMIC);
+		if (!skb)
+			return NET_RX_DROP;
 	}
 
-drop_skb:
-	kfree_skb(skb);
-drop:
-	return NET_RX_DROP;
+	return lowpan_invoke_rx_handlers(skb);
 }
 
 static struct packet_type lowpan_packet_type = {
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index f6263fc..62a21f6 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -10,6 +10,7 @@
 
 #include <net/6lowpan.h>
 #include <net/ieee802154_netdev.h>
+#include <net/mac802154.h>
 
 #include "6lowpan_i.h"
 
@@ -36,7 +37,14 @@
 			sizeof(struct lowpan_addr_info));
 }
 
-int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
+/* This callback will be called from AF_PACKET and IPv6 stack, the AF_PACKET
+ * sockets gives an 8 byte array for addresses only!
+ *
+ * TODO I think AF_PACKET DGRAM (sending/receiving) RAW (sending) makes no
+ * sense here. We should disable it, the right use-case would be AF_INET6
+ * RAW/DGRAM sockets.
+ */
+int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
 			 unsigned short type, const void *_daddr,
 			 const void *_saddr, unsigned int len)
 {
@@ -51,7 +59,7 @@
 		return 0;
 
 	if (!saddr)
-		saddr = dev->dev_addr;
+		saddr = ldev->dev_addr;
 
 	raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
 	raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
@@ -71,28 +79,33 @@
 
 static struct sk_buff*
 lowpan_alloc_frag(struct sk_buff *skb, int size,
-		  const struct ieee802154_hdr *master_hdr)
+		  const struct ieee802154_hdr *master_hdr, bool frag1)
 {
-	struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
+	struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev;
 	struct sk_buff *frag;
 	int rc;
 
-	frag = alloc_skb(real_dev->hard_header_len +
-			 real_dev->needed_tailroom + size,
+	frag = alloc_skb(wdev->needed_headroom + wdev->needed_tailroom + size,
 			 GFP_ATOMIC);
 
 	if (likely(frag)) {
-		frag->dev = real_dev;
+		frag->dev = wdev;
 		frag->priority = skb->priority;
-		skb_reserve(frag, real_dev->hard_header_len);
+		skb_reserve(frag, wdev->needed_headroom);
 		skb_reset_network_header(frag);
 		*mac_cb(frag) = *mac_cb(skb);
 
-		rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
-				     &master_hdr->source, size);
-		if (rc < 0) {
-			kfree_skb(frag);
-			return ERR_PTR(rc);
+		if (frag1) {
+			memcpy(skb_put(frag, skb->mac_len),
+			       skb_mac_header(skb), skb->mac_len);
+		} else {
+			rc = wpan_dev_hard_header(frag, wdev,
+						  &master_hdr->dest,
+						  &master_hdr->source, size);
+			if (rc < 0) {
+				kfree_skb(frag);
+				return ERR_PTR(rc);
+			}
 		}
 	} else {
 		frag = ERR_PTR(-ENOMEM);
@@ -104,13 +117,13 @@
 static int
 lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
 		     u8 *frag_hdr, int frag_hdrlen,
-		     int offset, int len)
+		     int offset, int len, bool frag1)
 {
 	struct sk_buff *frag;
 
 	raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
 
-	frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
+	frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr, frag1);
 	if (IS_ERR(frag))
 		return PTR_ERR(frag);
 
@@ -123,19 +136,17 @@
 }
 
 static int
-lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
-		       const struct ieee802154_hdr *wpan_hdr)
+lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
+		       const struct ieee802154_hdr *wpan_hdr, u16 dgram_size,
+		       u16 dgram_offset)
 {
-	u16 dgram_size, dgram_offset;
 	__be16 frag_tag;
 	u8 frag_hdr[5];
 	int frag_cap, frag_len, payload_cap, rc;
 	int skb_unprocessed, skb_offset;
 
-	dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
-		     skb->mac_len;
-	frag_tag = htons(lowpan_dev_info(dev)->fragment_tag);
-	lowpan_dev_info(dev)->fragment_tag++;
+	frag_tag = htons(lowpan_dev_info(ldev)->fragment_tag);
+	lowpan_dev_info(ldev)->fragment_tag++;
 
 	frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
 	frag_hdr[1] = dgram_size & 0xff;
@@ -151,7 +162,8 @@
 
 	rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
 				  LOWPAN_FRAG1_HEAD_SIZE, 0,
-				  frag_len + skb_network_header_len(skb));
+				  frag_len + skb_network_header_len(skb),
+				  true);
 	if (rc) {
 		pr_debug("%s unable to send FRAG1 packet (tag: %d)",
 			 __func__, ntohs(frag_tag));
@@ -172,7 +184,7 @@
 
 		rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
 					  LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
-					  frag_len);
+					  frag_len, false);
 		if (rc) {
 			pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
 				 __func__, ntohs(frag_tag), skb_offset);
@@ -180,6 +192,8 @@
 		}
 	} while (skb_unprocessed > frag_cap);
 
+	ldev->stats.tx_packets++;
+	ldev->stats.tx_bytes += dgram_size;
 	consume_skb(skb);
 	return NET_XMIT_SUCCESS;
 
@@ -188,9 +202,10 @@
 	return rc;
 }
 
-static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
+static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
+			 u16 *dgram_size, u16 *dgram_offset)
 {
-	struct wpan_dev *wpan_dev = lowpan_dev_info(dev)->real_dev->ieee802154_ptr;
+	struct wpan_dev *wpan_dev = lowpan_dev_info(ldev)->wdev->ieee802154_ptr;
 	struct ieee802154_addr sa, da;
 	struct ieee802154_mac_cb *cb = mac_cb_init(skb);
 	struct lowpan_addr_info info;
@@ -202,7 +217,10 @@
 	daddr = &info.daddr.u.extended_addr;
 	saddr = &info.saddr.u.extended_addr;
 
-	lowpan_header_compress(skb, dev, ETH_P_IPV6, daddr, saddr, skb->len);
+	*dgram_size = skb->len;
+	lowpan_header_compress(skb, ldev, ETH_P_IPV6, daddr, saddr, skb->len);
+	/* dgram_offset = (saved bytes after compression) + lowpan header len */
+	*dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
 
 	cb->type = IEEE802154_FC_TYPE_DATA;
 
@@ -227,17 +245,20 @@
 		cb->ackreq = wpan_dev->ackreq;
 	}
 
-	return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
-			ETH_P_IPV6, (void *)&da, (void *)&sa, 0);
+	return wpan_dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, &da, &sa,
+				    0);
 }
 
-netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
 {
 	struct ieee802154_hdr wpan_hdr;
 	int max_single, ret;
+	u16 dgram_size, dgram_offset;
 
 	pr_debug("package xmit\n");
 
+	WARN_ON_ONCE(skb->len > IPV6_MIN_MTU);
+
 	/* We must take a copy of the skb before we modify/replace the ipv6
 	 * header as the header could be used elsewhere
 	 */
@@ -245,7 +266,7 @@
 	if (!skb)
 		return NET_XMIT_DROP;
 
-	ret = lowpan_header(skb, dev);
+	ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
 	if (ret < 0) {
 		kfree_skb(skb);
 		return NET_XMIT_DROP;
@@ -259,13 +280,16 @@
 	max_single = ieee802154_max_payload(&wpan_hdr);
 
 	if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
-		skb->dev = lowpan_dev_info(dev)->real_dev;
+		skb->dev = lowpan_dev_info(ldev)->wdev;
+		ldev->stats.tx_packets++;
+		ldev->stats.tx_bytes += dgram_size;
 		return dev_queue_xmit(skb);
 	} else {
 		netdev_tx_t rc;
 
 		pr_debug("frame is too big, fragmentation is needed\n");
-		rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
+		rc = lowpan_xmit_fragmented(skb, ldev, &wpan_hdr, dgram_size,
+					    dgram_offset);
 
 		return rc < 0 ? NET_XMIT_DROP : rc;
 	}
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig
index 1370d5b..188135b 100644
--- a/net/ieee802154/Kconfig
+++ b/net/ieee802154/Kconfig
@@ -12,6 +12,11 @@
 
 if IEEE802154
 
+config IEEE802154_NL802154_EXPERIMENTAL
+	bool "IEEE 802.15.4 experimental netlink support"
+	---help---
+	  Adds experimental netlink support for nl802154.
+
 config IEEE802154_SOCKET
 	tristate "IEEE 802.15.4 socket interface"
 	default y
diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c
index b0248e9..c35fdfa 100644
--- a/net/ieee802154/core.c
+++ b/net/ieee802154/core.c
@@ -95,6 +95,18 @@
 	return result;
 }
 
+struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx)
+{
+	struct cfg802154_registered_device *rdev;
+
+	ASSERT_RTNL();
+
+	rdev = cfg802154_rdev_by_wpan_phy_idx(wpan_phy_idx);
+	if (!rdev)
+		return NULL;
+	return &rdev->wpan_phy;
+}
+
 struct wpan_phy *
 wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
 {
diff --git a/net/ieee802154/core.h b/net/ieee802154/core.h
index f3e9558..231fade 100644
--- a/net/ieee802154/core.h
+++ b/net/ieee802154/core.h
@@ -42,5 +42,6 @@
 void cfg802154_dev_free(struct cfg802154_registered_device *rdev);
 struct cfg802154_registered_device *
 cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx);
+struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx);
 
 #endif /* __IEEE802154_CORE_H */
diff --git a/net/ieee802154/header_ops.c b/net/ieee802154/header_ops.c
index a051b69..c7439f0 100644
--- a/net/ieee802154/header_ops.c
+++ b/net/ieee802154/header_ops.c
@@ -83,35 +83,35 @@
 }
 
 int
-ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
+ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr)
 {
-	u8 buf[MAC802154_FRAME_HARD_HEADER_LEN];
+	u8 buf[IEEE802154_MAX_HEADER_LEN];
 	int pos = 2;
 	int rc;
-	struct ieee802154_hdr_fc fc = hdr->fc;
+	struct ieee802154_hdr_fc *fc = &hdr->fc;
 
 	buf[pos++] = hdr->seq;
 
-	fc.dest_addr_mode = hdr->dest.mode;
+	fc->dest_addr_mode = hdr->dest.mode;
 
 	rc = ieee802154_hdr_push_addr(buf + pos, &hdr->dest, false);
 	if (rc < 0)
 		return -EINVAL;
 	pos += rc;
 
-	fc.source_addr_mode = hdr->source.mode;
+	fc->source_addr_mode = hdr->source.mode;
 
 	if (hdr->source.pan_id == hdr->dest.pan_id &&
 	    hdr->dest.mode != IEEE802154_ADDR_NONE)
-		fc.intra_pan = true;
+		fc->intra_pan = true;
 
-	rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc.intra_pan);
+	rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc->intra_pan);
 	if (rc < 0)
 		return -EINVAL;
 	pos += rc;
 
-	if (fc.security_enabled) {
-		fc.version = 1;
+	if (fc->security_enabled) {
+		fc->version = 1;
 
 		rc = ieee802154_hdr_push_sechdr(buf + pos, &hdr->sec);
 		if (rc < 0)
@@ -120,7 +120,7 @@
 		pos += rc;
 	}
 
-	memcpy(buf, &fc, 2);
+	memcpy(buf, fc, 2);
 
 	memcpy(skb_push(skb, pos), buf, pos);
 
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 3f89c0a..16ef0d9 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -232,8 +232,86 @@
 	[NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED },
 
 	[NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 },
+
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	[NL802154_ATTR_SEC_ENABLED] = { .type = NLA_U8, },
+	[NL802154_ATTR_SEC_OUT_LEVEL] = { .type = NLA_U32, },
+	[NL802154_ATTR_SEC_OUT_KEY_ID] = { .type = NLA_NESTED, },
+	[NL802154_ATTR_SEC_FRAME_COUNTER] = { .type = NLA_U32 },
+
+	[NL802154_ATTR_SEC_LEVEL] = { .type = NLA_NESTED },
+	[NL802154_ATTR_SEC_DEVICE] = { .type = NLA_NESTED },
+	[NL802154_ATTR_SEC_DEVKEY] = { .type = NLA_NESTED },
+	[NL802154_ATTR_SEC_KEY] = { .type = NLA_NESTED },
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
 };
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+static int
+nl802154_prepare_wpan_dev_dump(struct sk_buff *skb,
+			       struct netlink_callback *cb,
+			       struct cfg802154_registered_device **rdev,
+			       struct wpan_dev **wpan_dev)
+{
+	int err;
+
+	rtnl_lock();
+
+	if (!cb->args[0]) {
+		err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl802154_fam.hdrsize,
+				  nl802154_fam.attrbuf, nl802154_fam.maxattr,
+				  nl802154_policy);
+		if (err)
+			goto out_unlock;
+
+		*wpan_dev = __cfg802154_wpan_dev_from_attrs(sock_net(skb->sk),
+							    nl802154_fam.attrbuf);
+		if (IS_ERR(*wpan_dev)) {
+			err = PTR_ERR(*wpan_dev);
+			goto out_unlock;
+		}
+		*rdev = wpan_phy_to_rdev((*wpan_dev)->wpan_phy);
+		/* 0 is the first index - add 1 to parse only once */
+		cb->args[0] = (*rdev)->wpan_phy_idx + 1;
+		cb->args[1] = (*wpan_dev)->identifier;
+	} else {
+		/* subtract the 1 again here */
+		struct wpan_phy *wpan_phy = wpan_phy_idx_to_wpan_phy(cb->args[0] - 1);
+		struct wpan_dev *tmp;
+
+		if (!wpan_phy) {
+			err = -ENODEV;
+			goto out_unlock;
+		}
+		*rdev = wpan_phy_to_rdev(wpan_phy);
+		*wpan_dev = NULL;
+
+		list_for_each_entry(tmp, &(*rdev)->wpan_dev_list, list) {
+			if (tmp->identifier == cb->args[1]) {
+				*wpan_dev = tmp;
+				break;
+			}
+		}
+
+		if (!*wpan_dev) {
+			err = -ENODEV;
+			goto out_unlock;
+		}
+	}
+
+	return 0;
+ out_unlock:
+	rtnl_unlock();
+	return err;
+}
+
+static void
+nl802154_finish_wpan_dev_dump(struct cfg802154_registered_device *rdev)
+{
+	rtnl_unlock();
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 /* message building helper */
 static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
 				    int flags, u8 cmd)
@@ -612,6 +690,107 @@
 	       ((u64)wpan_phy_to_rdev(wpan_dev->wpan_phy)->wpan_phy_idx << 32);
 }
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+#include <net/ieee802154_netdev.h>
+
+static int
+ieee802154_llsec_send_key_id(struct sk_buff *msg,
+			     const struct ieee802154_llsec_key_id *desc)
+{
+	struct nlattr *nl_dev_addr;
+
+	if (nla_put_u32(msg, NL802154_KEY_ID_ATTR_MODE, desc->mode))
+		return -ENOBUFS;
+
+	switch (desc->mode) {
+	case NL802154_KEY_ID_MODE_IMPLICIT:
+		nl_dev_addr = nla_nest_start(msg, NL802154_KEY_ID_ATTR_IMPLICIT);
+		if (!nl_dev_addr)
+			return -ENOBUFS;
+
+		if (nla_put_le16(msg, NL802154_DEV_ADDR_ATTR_PAN_ID,
+				 desc->device_addr.pan_id) ||
+		    nla_put_u32(msg,  NL802154_DEV_ADDR_ATTR_MODE,
+				desc->device_addr.mode))
+			return -ENOBUFS;
+
+		switch (desc->device_addr.mode) {
+		case NL802154_DEV_ADDR_SHORT:
+			if (nla_put_le16(msg, NL802154_DEV_ADDR_ATTR_SHORT,
+					 desc->device_addr.short_addr))
+				return -ENOBUFS;
+			break;
+		case NL802154_DEV_ADDR_EXTENDED:
+			if (nla_put_le64(msg, NL802154_DEV_ADDR_ATTR_EXTENDED,
+					 desc->device_addr.extended_addr))
+				return -ENOBUFS;
+			break;
+		default:
+			/* userspace should handle unknown */
+			break;
+		}
+
+		nla_nest_end(msg, nl_dev_addr);
+		break;
+	case NL802154_KEY_ID_MODE_INDEX:
+		break;
+	case NL802154_KEY_ID_MODE_INDEX_SHORT:
+		/* TODO renmae short_source? */
+		if (nla_put_le32(msg, NL802154_KEY_ID_ATTR_SOURCE_SHORT,
+				 desc->short_source))
+			return -ENOBUFS;
+		break;
+	case NL802154_KEY_ID_MODE_INDEX_EXTENDED:
+		if (nla_put_le64(msg, NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
+				 desc->extended_source))
+			return -ENOBUFS;
+		break;
+	default:
+		/* userspace should handle unknown */
+		break;
+	}
+
+	/* TODO key_id to key_idx ? Check naming */
+	if (desc->mode != NL802154_KEY_ID_MODE_IMPLICIT) {
+		if (nla_put_u8(msg, NL802154_KEY_ID_ATTR_INDEX, desc->id))
+			return -ENOBUFS;
+	}
+
+	return 0;
+}
+
+static int nl802154_get_llsec_params(struct sk_buff *msg,
+				     struct cfg802154_registered_device *rdev,
+				     struct wpan_dev *wpan_dev)
+{
+	struct nlattr *nl_key_id;
+	struct ieee802154_llsec_params params;
+	int ret;
+
+	ret = rdev_get_llsec_params(rdev, wpan_dev, &params);
+	if (ret < 0)
+		return ret;
+
+	if (nla_put_u8(msg, NL802154_ATTR_SEC_ENABLED, params.enabled) ||
+	    nla_put_u32(msg, NL802154_ATTR_SEC_OUT_LEVEL, params.out_level) ||
+	    nla_put_be32(msg, NL802154_ATTR_SEC_FRAME_COUNTER,
+			 params.frame_counter))
+		return -ENOBUFS;
+
+	nl_key_id = nla_nest_start(msg, NL802154_ATTR_SEC_OUT_KEY_ID);
+	if (!nl_key_id)
+		return -ENOBUFS;
+
+	ret = ieee802154_llsec_send_key_id(msg, &params.out_key);
+	if (ret < 0)
+		return ret;
+
+	nla_nest_end(msg, nl_key_id);
+
+	return 0;
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 static int
 nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
 		    struct cfg802154_registered_device *rdev,
@@ -663,6 +842,11 @@
 	if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq))
 		goto nla_put_failure;
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0)
+		goto nla_put_failure;
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 	genlmsg_end(msg, hdr);
 	return 0;
 
@@ -753,10 +937,8 @@
 			return -EINVAL;
 	}
 
-	/* TODO add nla_get_le64 to netlink */
 	if (info->attrs[NL802154_ATTR_EXTENDED_ADDR])
-		extended_addr = (__force __le64)nla_get_u64(
-				info->attrs[NL802154_ATTR_EXTENDED_ADDR]);
+		extended_addr = nla_get_le64(info->attrs[NL802154_ATTR_EXTENDED_ADDR]);
 
 	if (!rdev->ops->add_virtual_intf)
 		return -EOPNOTSUPP;
@@ -1075,6 +1257,838 @@
 	return rdev_set_ackreq_default(rdev, wpan_dev, ackreq);
 }
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+static const struct nla_policy nl802154_dev_addr_policy[NL802154_DEV_ADDR_ATTR_MAX + 1] = {
+	[NL802154_DEV_ADDR_ATTR_PAN_ID] = { .type = NLA_U16 },
+	[NL802154_DEV_ADDR_ATTR_MODE] = { .type = NLA_U32 },
+	[NL802154_DEV_ADDR_ATTR_SHORT] = { .type = NLA_U16 },
+	[NL802154_DEV_ADDR_ATTR_EXTENDED] = { .type = NLA_U64 },
+};
+
+static int
+ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
+				struct ieee802154_addr *addr)
+{
+	struct nlattr *attrs[NL802154_DEV_ADDR_ATTR_MAX + 1];
+
+	if (!nla || nla_parse_nested(attrs, NL802154_DEV_ADDR_ATTR_MAX, nla,
+				     nl802154_dev_addr_policy))
+		return -EINVAL;
+
+	if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] &&
+	    !attrs[NL802154_DEV_ADDR_ATTR_MODE] &&
+	    !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
+	      attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
+		return -EINVAL;
+
+	addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]);
+	addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]);
+	switch (addr->mode) {
+	case NL802154_DEV_ADDR_SHORT:
+		addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]);
+		break;
+	case NL802154_DEV_ADDR_EXTENDED:
+		addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct nla_policy nl802154_key_id_policy[NL802154_KEY_ID_ATTR_MAX + 1] = {
+	[NL802154_KEY_ID_ATTR_MODE] = { .type = NLA_U32 },
+	[NL802154_KEY_ID_ATTR_INDEX] = { .type = NLA_U8 },
+	[NL802154_KEY_ID_ATTR_IMPLICIT] = { .type = NLA_NESTED },
+	[NL802154_KEY_ID_ATTR_SOURCE_SHORT] = { .type = NLA_U32 },
+	[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED] = { .type = NLA_U64 },
+};
+
+static int
+ieee802154_llsec_parse_key_id(struct nlattr *nla,
+			      struct ieee802154_llsec_key_id *desc)
+{
+	struct nlattr *attrs[NL802154_KEY_ID_ATTR_MAX + 1];
+
+	if (!nla || nla_parse_nested(attrs, NL802154_KEY_ID_ATTR_MAX, nla,
+				     nl802154_key_id_policy))
+		return -EINVAL;
+
+	if (!attrs[NL802154_KEY_ID_ATTR_MODE])
+		return -EINVAL;
+
+	desc->mode = nla_get_u32(attrs[NL802154_KEY_ID_ATTR_MODE]);
+	switch (desc->mode) {
+	case NL802154_KEY_ID_MODE_IMPLICIT:
+		if (!attrs[NL802154_KEY_ID_ATTR_IMPLICIT])
+			return -EINVAL;
+
+		if (ieee802154_llsec_parse_dev_addr(attrs[NL802154_KEY_ID_ATTR_IMPLICIT],
+						    &desc->device_addr) < 0)
+			return -EINVAL;
+		break;
+	case NL802154_KEY_ID_MODE_INDEX:
+		break;
+	case NL802154_KEY_ID_MODE_INDEX_SHORT:
+		if (!attrs[NL802154_KEY_ID_ATTR_SOURCE_SHORT])
+			return -EINVAL;
+
+		desc->short_source = nla_get_le32(attrs[NL802154_KEY_ID_ATTR_SOURCE_SHORT]);
+		break;
+	case NL802154_KEY_ID_MODE_INDEX_EXTENDED:
+		if (!attrs[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED])
+			return -EINVAL;
+
+		desc->extended_source = nla_get_le64(attrs[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED]);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (desc->mode != NL802154_KEY_ID_MODE_IMPLICIT) {
+		if (!attrs[NL802154_KEY_ID_ATTR_INDEX])
+			return -EINVAL;
+
+		/* TODO change id to idx */
+		desc->id = nla_get_u8(attrs[NL802154_KEY_ID_ATTR_INDEX]);
+	}
+
+	return 0;
+}
+
+static int nl802154_set_llsec_params(struct sk_buff *skb,
+				     struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct ieee802154_llsec_params params;
+	u32 changed = 0;
+	int ret;
+
+	if (info->attrs[NL802154_ATTR_SEC_ENABLED]) {
+		u8 enabled;
+
+		enabled = nla_get_u8(info->attrs[NL802154_ATTR_SEC_ENABLED]);
+		if (enabled != 0 && enabled != 1)
+			return -EINVAL;
+
+		params.enabled = nla_get_u8(info->attrs[NL802154_ATTR_SEC_ENABLED]);
+		changed |= IEEE802154_LLSEC_PARAM_ENABLED;
+	}
+
+	if (info->attrs[NL802154_ATTR_SEC_OUT_KEY_ID]) {
+		ret = ieee802154_llsec_parse_key_id(info->attrs[NL802154_ATTR_SEC_OUT_KEY_ID],
+						    &params.out_key);
+		if (ret < 0)
+			return ret;
+
+		changed |= IEEE802154_LLSEC_PARAM_OUT_KEY;
+	}
+
+	if (info->attrs[NL802154_ATTR_SEC_OUT_LEVEL]) {
+		params.out_level = nla_get_u32(info->attrs[NL802154_ATTR_SEC_OUT_LEVEL]);
+		if (params.out_level > NL802154_SECLEVEL_MAX)
+			return -EINVAL;
+
+		changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL;
+	}
+
+	if (info->attrs[NL802154_ATTR_SEC_FRAME_COUNTER]) {
+		params.frame_counter = nla_get_be32(info->attrs[NL802154_ATTR_SEC_FRAME_COUNTER]);
+		changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER;
+	}
+
+	return rdev_set_llsec_params(rdev, wpan_dev, &params, changed);
+}
+
+static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid,
+			     u32 seq, int flags,
+			     struct cfg802154_registered_device *rdev,
+			     struct net_device *dev,
+			     const struct ieee802154_llsec_key_entry *key)
+{
+	void *hdr;
+	u32 commands[NL802154_CMD_FRAME_NR_IDS / 32];
+	struct nlattr *nl_key, *nl_key_id;
+
+	hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+	if (!hdr)
+		return -1;
+
+	if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
+		goto nla_put_failure;
+
+	nl_key = nla_nest_start(msg, NL802154_ATTR_SEC_KEY);
+	if (!nl_key)
+		goto nla_put_failure;
+
+	nl_key_id = nla_nest_start(msg, NL802154_KEY_ATTR_ID);
+	if (!nl_key_id)
+		goto nla_put_failure;
+
+	if (ieee802154_llsec_send_key_id(msg, &key->id) < 0)
+		goto nla_put_failure;
+
+	nla_nest_end(msg, nl_key_id);
+
+	if (nla_put_u8(msg, NL802154_KEY_ATTR_USAGE_FRAMES,
+		       key->key->frame_types))
+		goto nla_put_failure;
+
+	if (key->key->frame_types & BIT(NL802154_FRAME_CMD)) {
+		/* TODO for each nested */
+		memset(commands, 0, sizeof(commands));
+		commands[7] = key->key->cmd_frame_ids;
+		if (nla_put(msg, NL802154_KEY_ATTR_USAGE_CMDS,
+			    sizeof(commands), commands))
+			goto nla_put_failure;
+	}
+
+	if (nla_put(msg, NL802154_KEY_ATTR_BYTES, NL802154_KEY_SIZE,
+		    key->key->key))
+		goto nla_put_failure;
+
+	nla_nest_end(msg, nl_key);
+	genlmsg_end(msg, hdr);
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int
+nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct cfg802154_registered_device *rdev = NULL;
+	struct ieee802154_llsec_key_entry *key;
+	struct ieee802154_llsec_table *table;
+	struct wpan_dev *wpan_dev;
+	int err;
+
+	err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
+	if (err)
+		return err;
+
+	if (!wpan_dev->netdev) {
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	rdev_lock_llsec_table(rdev, wpan_dev);
+	rdev_get_llsec_table(rdev, wpan_dev, &table);
+
+	/* TODO make it like station dump */
+	if (cb->args[2])
+		goto out;
+
+	list_for_each_entry(key, &table->keys, list) {
+		if (nl802154_send_key(skb, NL802154_CMD_NEW_SEC_KEY,
+				      NETLINK_CB(cb->skb).portid,
+				      cb->nlh->nlmsg_seq, NLM_F_MULTI,
+				      rdev, wpan_dev->netdev, key) < 0) {
+			/* TODO */
+			err = -EIO;
+			rdev_unlock_llsec_table(rdev, wpan_dev);
+			goto out_err;
+		}
+	}
+
+	cb->args[2] = 1;
+
+out:
+	rdev_unlock_llsec_table(rdev, wpan_dev);
+	err = skb->len;
+out_err:
+	nl802154_finish_wpan_dev_dump(rdev);
+
+	return err;
+}
+
+static const struct nla_policy nl802154_key_policy[NL802154_KEY_ATTR_MAX + 1] = {
+	[NL802154_KEY_ATTR_ID] = { NLA_NESTED },
+	/* TODO handle it as for_each_nested and NLA_FLAG? */
+	[NL802154_KEY_ATTR_USAGE_FRAMES] = { NLA_U8 },
+	/* TODO handle it as for_each_nested, not static array? */
+	[NL802154_KEY_ATTR_USAGE_CMDS] = { .len = NL802154_CMD_FRAME_NR_IDS / 8 },
+	[NL802154_KEY_ATTR_BYTES] = { .len = NL802154_KEY_SIZE },
+};
+
+static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
+	struct ieee802154_llsec_key key = { };
+	struct ieee802154_llsec_key_id id = { };
+	u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
+
+	if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX,
+			     info->attrs[NL802154_ATTR_SEC_KEY],
+			     nl802154_key_policy))
+		return -EINVAL;
+
+	if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] ||
+	    !attrs[NL802154_KEY_ATTR_BYTES])
+		return -EINVAL;
+
+	if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
+		return -ENOBUFS;
+
+	key.frame_types = nla_get_u8(attrs[NL802154_KEY_ATTR_USAGE_FRAMES]);
+	if (key.frame_types > BIT(NL802154_FRAME_MAX) ||
+	    ((key.frame_types & BIT(NL802154_FRAME_CMD)) &&
+	     !attrs[NL802154_KEY_ATTR_USAGE_CMDS]))
+		return -EINVAL;
+
+	if (attrs[NL802154_KEY_ATTR_USAGE_CMDS]) {
+		/* TODO for each nested */
+		nla_memcpy(commands, attrs[NL802154_KEY_ATTR_USAGE_CMDS],
+			   NL802154_CMD_FRAME_NR_IDS / 8);
+
+		/* TODO understand the -EINVAL logic here? last condition */
+		if (commands[0] || commands[1] || commands[2] || commands[3] ||
+		    commands[4] || commands[5] || commands[6] ||
+		    commands[7] > BIT(NL802154_CMD_FRAME_MAX))
+			return -EINVAL;
+
+		key.cmd_frame_ids = commands[7];
+	} else {
+		key.cmd_frame_ids = 0;
+	}
+
+	nla_memcpy(key.key, attrs[NL802154_KEY_ATTR_BYTES], NL802154_KEY_SIZE);
+
+	if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
+		return -ENOBUFS;
+
+	return rdev_add_llsec_key(rdev, wpan_dev, &id, &key);
+}
+
+static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
+	struct ieee802154_llsec_key_id id;
+
+	if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX,
+			     info->attrs[NL802154_ATTR_SEC_KEY],
+			     nl802154_key_policy))
+		return -EINVAL;
+
+	if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
+		return -ENOBUFS;
+
+	return rdev_del_llsec_key(rdev, wpan_dev, &id);
+}
+
+static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid,
+				u32 seq, int flags,
+				struct cfg802154_registered_device *rdev,
+				struct net_device *dev,
+				const struct ieee802154_llsec_device *dev_desc)
+{
+	void *hdr;
+	struct nlattr *nl_device;
+
+	hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+	if (!hdr)
+		return -1;
+
+	if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
+		goto nla_put_failure;
+
+	nl_device = nla_nest_start(msg, NL802154_ATTR_SEC_DEVICE);
+	if (!nl_device)
+		goto nla_put_failure;
+
+	if (nla_put_u32(msg, NL802154_DEV_ATTR_FRAME_COUNTER,
+			dev_desc->frame_counter) ||
+	    nla_put_le16(msg, NL802154_DEV_ATTR_PAN_ID, dev_desc->pan_id) ||
+	    nla_put_le16(msg, NL802154_DEV_ATTR_SHORT_ADDR,
+			 dev_desc->short_addr) ||
+	    nla_put_le64(msg, NL802154_DEV_ATTR_EXTENDED_ADDR,
+			 dev_desc->hwaddr) ||
+	    nla_put_u8(msg, NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
+		       dev_desc->seclevel_exempt) ||
+	    nla_put_u32(msg, NL802154_DEV_ATTR_KEY_MODE, dev_desc->key_mode))
+		goto nla_put_failure;
+
+	nla_nest_end(msg, nl_device);
+	genlmsg_end(msg, hdr);
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int
+nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct cfg802154_registered_device *rdev = NULL;
+	struct ieee802154_llsec_device *dev;
+	struct ieee802154_llsec_table *table;
+	struct wpan_dev *wpan_dev;
+	int err;
+
+	err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
+	if (err)
+		return err;
+
+	if (!wpan_dev->netdev) {
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	rdev_lock_llsec_table(rdev, wpan_dev);
+	rdev_get_llsec_table(rdev, wpan_dev, &table);
+
+	/* TODO make it like station dump */
+	if (cb->args[2])
+		goto out;
+
+	list_for_each_entry(dev, &table->devices, list) {
+		if (nl802154_send_device(skb, NL802154_CMD_NEW_SEC_LEVEL,
+					 NETLINK_CB(cb->skb).portid,
+					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
+					 rdev, wpan_dev->netdev, dev) < 0) {
+			/* TODO */
+			err = -EIO;
+			rdev_unlock_llsec_table(rdev, wpan_dev);
+			goto out_err;
+		}
+	}
+
+	cb->args[2] = 1;
+
+out:
+	rdev_unlock_llsec_table(rdev, wpan_dev);
+	err = skb->len;
+out_err:
+	nl802154_finish_wpan_dev_dump(rdev);
+
+	return err;
+}
+
+static const struct nla_policy nl802154_dev_policy[NL802154_DEV_ATTR_MAX + 1] = {
+	[NL802154_DEV_ATTR_FRAME_COUNTER] = { NLA_U32 },
+	[NL802154_DEV_ATTR_PAN_ID] = { .type = NLA_U16 },
+	[NL802154_DEV_ATTR_SHORT_ADDR] = { .type = NLA_U16 },
+	[NL802154_DEV_ATTR_EXTENDED_ADDR] = { .type = NLA_U64 },
+	[NL802154_DEV_ATTR_SECLEVEL_EXEMPT] = { NLA_U8 },
+	[NL802154_DEV_ATTR_KEY_MODE] = { NLA_U32 },
+};
+
+static int
+ieee802154_llsec_parse_device(struct nlattr *nla,
+			      struct ieee802154_llsec_device *dev)
+{
+	struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
+
+	if (!nla || nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, nla,
+				     nl802154_dev_policy))
+		return -EINVAL;
+
+	memset(dev, 0, sizeof(*dev));
+
+	if (!attrs[NL802154_DEV_ATTR_FRAME_COUNTER] ||
+	    !attrs[NL802154_DEV_ATTR_PAN_ID] ||
+	    !attrs[NL802154_DEV_ATTR_SHORT_ADDR] ||
+	    !attrs[NL802154_DEV_ATTR_EXTENDED_ADDR] ||
+	    !attrs[NL802154_DEV_ATTR_SECLEVEL_EXEMPT] ||
+	    !attrs[NL802154_DEV_ATTR_KEY_MODE])
+		return -EINVAL;
+
+	/* TODO be32 */
+	dev->frame_counter = nla_get_u32(attrs[NL802154_DEV_ATTR_FRAME_COUNTER]);
+	dev->pan_id = nla_get_le16(attrs[NL802154_DEV_ATTR_PAN_ID]);
+	dev->short_addr = nla_get_le16(attrs[NL802154_DEV_ATTR_SHORT_ADDR]);
+	/* TODO rename hwaddr to extended_addr */
+	dev->hwaddr = nla_get_le64(attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]);
+	dev->seclevel_exempt = nla_get_u8(attrs[NL802154_DEV_ATTR_SECLEVEL_EXEMPT]);
+	dev->key_mode = nla_get_u32(attrs[NL802154_DEV_ATTR_KEY_MODE]);
+
+	if (dev->key_mode > NL802154_DEVKEY_MAX ||
+	    (dev->seclevel_exempt != 0 && dev->seclevel_exempt != 1))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct ieee802154_llsec_device dev_desc;
+
+	if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
+					  &dev_desc) < 0)
+		return -EINVAL;
+
+	return rdev_add_device(rdev, wpan_dev, &dev_desc);
+}
+
+static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
+	__le64 extended_addr;
+
+	if (nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX,
+			     info->attrs[NL802154_ATTR_SEC_DEVICE],
+			     nl802154_dev_policy))
+		return -EINVAL;
+
+	if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR])
+		return -EINVAL;
+
+	extended_addr = nla_get_le64(attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]);
+	return rdev_del_device(rdev, wpan_dev, extended_addr);
+}
+
+static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid,
+				u32 seq, int flags,
+				struct cfg802154_registered_device *rdev,
+				struct net_device *dev, __le64 extended_addr,
+				const struct ieee802154_llsec_device_key *devkey)
+{
+	void *hdr;
+	struct nlattr *nl_devkey, *nl_key_id;
+
+	hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+	if (!hdr)
+		return -1;
+
+	if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
+		goto nla_put_failure;
+
+	nl_devkey = nla_nest_start(msg, NL802154_ATTR_SEC_DEVKEY);
+	if (!nl_devkey)
+		goto nla_put_failure;
+
+	if (nla_put_le64(msg, NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
+			 extended_addr) ||
+	    nla_put_u32(msg, NL802154_DEVKEY_ATTR_FRAME_COUNTER,
+			devkey->frame_counter))
+		goto nla_put_failure;
+
+	nl_key_id = nla_nest_start(msg, NL802154_DEVKEY_ATTR_ID);
+	if (!nl_key_id)
+		goto nla_put_failure;
+
+	if (ieee802154_llsec_send_key_id(msg, &devkey->key_id) < 0)
+		goto nla_put_failure;
+
+	nla_nest_end(msg, nl_key_id);
+	nla_nest_end(msg, nl_devkey);
+	genlmsg_end(msg, hdr);
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int
+nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct cfg802154_registered_device *rdev = NULL;
+	struct ieee802154_llsec_device_key *kpos;
+	struct ieee802154_llsec_device *dpos;
+	struct ieee802154_llsec_table *table;
+	struct wpan_dev *wpan_dev;
+	int err;
+
+	err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
+	if (err)
+		return err;
+
+	if (!wpan_dev->netdev) {
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	rdev_lock_llsec_table(rdev, wpan_dev);
+	rdev_get_llsec_table(rdev, wpan_dev, &table);
+
+	/* TODO make it like station dump */
+	if (cb->args[2])
+		goto out;
+
+	/* TODO look if remove devkey and do some nested attribute */
+	list_for_each_entry(dpos, &table->devices, list) {
+		list_for_each_entry(kpos, &dpos->keys, list) {
+			if (nl802154_send_devkey(skb,
+						 NL802154_CMD_NEW_SEC_LEVEL,
+						 NETLINK_CB(cb->skb).portid,
+						 cb->nlh->nlmsg_seq,
+						 NLM_F_MULTI, rdev,
+						 wpan_dev->netdev,
+						 dpos->hwaddr,
+						 kpos) < 0) {
+				/* TODO */
+				err = -EIO;
+				rdev_unlock_llsec_table(rdev, wpan_dev);
+				goto out_err;
+			}
+		}
+	}
+
+	cb->args[2] = 1;
+
+out:
+	rdev_unlock_llsec_table(rdev, wpan_dev);
+	err = skb->len;
+out_err:
+	nl802154_finish_wpan_dev_dump(rdev);
+
+	return err;
+}
+
+static const struct nla_policy nl802154_devkey_policy[NL802154_DEVKEY_ATTR_MAX + 1] = {
+	[NL802154_DEVKEY_ATTR_FRAME_COUNTER] = { NLA_U32 },
+	[NL802154_DEVKEY_ATTR_EXTENDED_ADDR] = { NLA_U64 },
+	[NL802154_DEVKEY_ATTR_ID] = { NLA_NESTED },
+};
+
+static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct nlattr *attrs[NL802154_DEVKEY_ATTR_MAX + 1];
+	struct ieee802154_llsec_device_key key;
+	__le64 extended_addr;
+
+	if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
+	    nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX,
+			     info->attrs[NL802154_ATTR_SEC_DEVKEY],
+			     nl802154_devkey_policy) < 0)
+		return -EINVAL;
+
+	if (!attrs[NL802154_DEVKEY_ATTR_FRAME_COUNTER] ||
+	    !attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
+		return -EINVAL;
+
+	/* TODO change key.id ? */
+	if (ieee802154_llsec_parse_key_id(attrs[NL802154_DEVKEY_ATTR_ID],
+					  &key.key_id) < 0)
+		return -ENOBUFS;
+
+	/* TODO be32 */
+	key.frame_counter = nla_get_u32(attrs[NL802154_DEVKEY_ATTR_FRAME_COUNTER]);
+	/* TODO change naming hwaddr -> extended_addr
+	 * check unique identifier short+pan OR extended_addr
+	 */
+	extended_addr = nla_get_le64(attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]);
+	return rdev_add_devkey(rdev, wpan_dev, extended_addr, &key);
+}
+
+static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct nlattr *attrs[NL802154_DEVKEY_ATTR_MAX + 1];
+	struct ieee802154_llsec_device_key key;
+	__le64 extended_addr;
+
+	if (nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX,
+			     info->attrs[NL802154_ATTR_SEC_DEVKEY],
+			     nl802154_devkey_policy))
+		return -EINVAL;
+
+	if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
+		return -EINVAL;
+
+	/* TODO change key.id ? */
+	if (ieee802154_llsec_parse_key_id(attrs[NL802154_DEVKEY_ATTR_ID],
+					  &key.key_id) < 0)
+		return -ENOBUFS;
+
+	/* TODO change naming hwaddr -> extended_addr
+	 * check unique identifier short+pan OR extended_addr
+	 */
+	extended_addr = nla_get_le64(attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]);
+	return rdev_del_devkey(rdev, wpan_dev, extended_addr, &key);
+}
+
+static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid,
+				  u32 seq, int flags,
+				  struct cfg802154_registered_device *rdev,
+				  struct net_device *dev,
+				  const struct ieee802154_llsec_seclevel *sl)
+{
+	void *hdr;
+	struct nlattr *nl_seclevel;
+
+	hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+	if (!hdr)
+		return -1;
+
+	if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
+		goto nla_put_failure;
+
+	nl_seclevel = nla_nest_start(msg, NL802154_ATTR_SEC_LEVEL);
+	if (!nl_seclevel)
+		goto nla_put_failure;
+
+	if (nla_put_u32(msg, NL802154_SECLEVEL_ATTR_FRAME, sl->frame_type) ||
+	    nla_put_u32(msg, NL802154_SECLEVEL_ATTR_LEVELS, sl->sec_levels) ||
+	    nla_put_u8(msg, NL802154_SECLEVEL_ATTR_DEV_OVERRIDE,
+		       sl->device_override))
+		goto nla_put_failure;
+
+	if (sl->frame_type == NL802154_FRAME_CMD) {
+		if (nla_put_u32(msg, NL802154_SECLEVEL_ATTR_CMD_FRAME,
+				sl->cmd_frame_id))
+			goto nla_put_failure;
+	}
+
+	nla_nest_end(msg, nl_seclevel);
+	genlmsg_end(msg, hdr);
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int
+nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct cfg802154_registered_device *rdev = NULL;
+	struct ieee802154_llsec_seclevel *sl;
+	struct ieee802154_llsec_table *table;
+	struct wpan_dev *wpan_dev;
+	int err;
+
+	err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
+	if (err)
+		return err;
+
+	if (!wpan_dev->netdev) {
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	rdev_lock_llsec_table(rdev, wpan_dev);
+	rdev_get_llsec_table(rdev, wpan_dev, &table);
+
+	/* TODO make it like station dump */
+	if (cb->args[2])
+		goto out;
+
+	list_for_each_entry(sl, &table->security_levels, list) {
+		if (nl802154_send_seclevel(skb, NL802154_CMD_NEW_SEC_LEVEL,
+					   NETLINK_CB(cb->skb).portid,
+					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
+					   rdev, wpan_dev->netdev, sl) < 0) {
+			/* TODO */
+			err = -EIO;
+			rdev_unlock_llsec_table(rdev, wpan_dev);
+			goto out_err;
+		}
+	}
+
+	cb->args[2] = 1;
+
+out:
+	rdev_unlock_llsec_table(rdev, wpan_dev);
+	err = skb->len;
+out_err:
+	nl802154_finish_wpan_dev_dump(rdev);
+
+	return err;
+}
+
+static const struct nla_policy nl802154_seclevel_policy[NL802154_SECLEVEL_ATTR_MAX + 1] = {
+	[NL802154_SECLEVEL_ATTR_LEVELS] = { .type = NLA_U8 },
+	[NL802154_SECLEVEL_ATTR_FRAME] = { .type = NLA_U32 },
+	[NL802154_SECLEVEL_ATTR_CMD_FRAME] = { .type = NLA_U32 },
+	[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE] = { .type = NLA_U8 },
+};
+
+static int
+llsec_parse_seclevel(struct nlattr *nla, struct ieee802154_llsec_seclevel *sl)
+{
+	struct nlattr *attrs[NL802154_SECLEVEL_ATTR_MAX + 1];
+
+	if (!nla || nla_parse_nested(attrs, NL802154_SECLEVEL_ATTR_MAX, nla,
+				     nl802154_seclevel_policy))
+		return -EINVAL;
+
+	memset(sl, 0, sizeof(*sl));
+
+	if (!attrs[NL802154_SECLEVEL_ATTR_LEVELS] ||
+	    !attrs[NL802154_SECLEVEL_ATTR_FRAME] ||
+	    !attrs[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE])
+		return -EINVAL;
+
+	sl->sec_levels = nla_get_u8(attrs[NL802154_SECLEVEL_ATTR_LEVELS]);
+	sl->frame_type = nla_get_u32(attrs[NL802154_SECLEVEL_ATTR_FRAME]);
+	sl->device_override = nla_get_u8(attrs[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE]);
+	if (sl->frame_type > NL802154_FRAME_MAX ||
+	    (sl->device_override != 0 && sl->device_override != 1))
+		return -EINVAL;
+
+	if (sl->frame_type == NL802154_FRAME_CMD) {
+		if (!attrs[NL802154_SECLEVEL_ATTR_CMD_FRAME])
+			return -EINVAL;
+
+		sl->cmd_frame_id = nla_get_u32(attrs[NL802154_SECLEVEL_ATTR_CMD_FRAME]);
+		if (sl->cmd_frame_id > NL802154_CMD_FRAME_MAX)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
+				       struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct ieee802154_llsec_seclevel sl;
+
+	if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
+				 &sl) < 0)
+		return -EINVAL;
+
+	return rdev_add_seclevel(rdev, wpan_dev, &sl);
+}
+
+static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
+				       struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct ieee802154_llsec_seclevel sl;
+
+	if (!info->attrs[NL802154_ATTR_SEC_LEVEL] ||
+	    llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
+				 &sl) < 0)
+		return -EINVAL;
+
+	return rdev_del_seclevel(rdev, wpan_dev, &sl);
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 #define NL802154_FLAG_NEED_WPAN_PHY	0x01
 #define NL802154_FLAG_NEED_NETDEV	0x02
 #define NL802154_FLAG_NEED_RTNL		0x04
@@ -1289,6 +2303,119 @@
 		.internal_flags = NL802154_FLAG_NEED_NETDEV |
 				  NL802154_FLAG_NEED_RTNL,
 	},
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	{
+		.cmd = NL802154_CMD_SET_SEC_PARAMS,
+		.doit = nl802154_set_llsec_params,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_GET_SEC_KEY,
+		/* TODO .doit by matching key id? */
+		.dumpit = nl802154_dump_llsec_key,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_NEW_SEC_KEY,
+		.doit = nl802154_add_llsec_key,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_DEL_SEC_KEY,
+		.doit = nl802154_del_llsec_key,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	/* TODO unique identifier must short+pan OR extended_addr */
+	{
+		.cmd = NL802154_CMD_GET_SEC_DEV,
+		/* TODO .doit by matching extended_addr? */
+		.dumpit = nl802154_dump_llsec_dev,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_NEW_SEC_DEV,
+		.doit = nl802154_add_llsec_dev,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_DEL_SEC_DEV,
+		.doit = nl802154_del_llsec_dev,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	/* TODO remove complete devkey, put it as nested? */
+	{
+		.cmd = NL802154_CMD_GET_SEC_DEVKEY,
+		/* TODO doit by matching ??? */
+		.dumpit = nl802154_dump_llsec_devkey,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_NEW_SEC_DEVKEY,
+		.doit = nl802154_add_llsec_devkey,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_DEL_SEC_DEVKEY,
+		.doit = nl802154_del_llsec_devkey,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_GET_SEC_LEVEL,
+		/* TODO .doit by matching frame_type? */
+		.dumpit = nl802154_dump_llsec_seclevel,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_NEW_SEC_LEVEL,
+		.doit = nl802154_add_llsec_seclevel,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_DEL_SEC_LEVEL,
+		/* TODO match frame_type only? */
+		.doit = nl802154_del_llsec_seclevel,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
 };
 
 /* initialisation/exit functions */
diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h
index 03b3575..4441c63 100644
--- a/net/ieee802154/rdev-ops.h
+++ b/net/ieee802154/rdev-ops.h
@@ -208,4 +208,113 @@
 	return ret;
 }
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+/* TODO this is already a nl802154, so move into ieee802154 */
+static inline void
+rdev_get_llsec_table(struct cfg802154_registered_device *rdev,
+		     struct wpan_dev *wpan_dev,
+		     struct ieee802154_llsec_table **table)
+{
+	rdev->ops->get_llsec_table(&rdev->wpan_phy, wpan_dev, table);
+}
+
+static inline void
+rdev_lock_llsec_table(struct cfg802154_registered_device *rdev,
+		      struct wpan_dev *wpan_dev)
+{
+	rdev->ops->lock_llsec_table(&rdev->wpan_phy, wpan_dev);
+}
+
+static inline void
+rdev_unlock_llsec_table(struct cfg802154_registered_device *rdev,
+			struct wpan_dev *wpan_dev)
+{
+	rdev->ops->unlock_llsec_table(&rdev->wpan_phy, wpan_dev);
+}
+
+static inline int
+rdev_get_llsec_params(struct cfg802154_registered_device *rdev,
+		      struct wpan_dev *wpan_dev,
+		      struct ieee802154_llsec_params *params)
+{
+	return rdev->ops->get_llsec_params(&rdev->wpan_phy, wpan_dev, params);
+}
+
+static inline int
+rdev_set_llsec_params(struct cfg802154_registered_device *rdev,
+		      struct wpan_dev *wpan_dev,
+		      const struct ieee802154_llsec_params *params,
+		      u32 changed)
+{
+	return rdev->ops->set_llsec_params(&rdev->wpan_phy, wpan_dev, params,
+					   changed);
+}
+
+static inline int
+rdev_add_llsec_key(struct cfg802154_registered_device *rdev,
+		   struct wpan_dev *wpan_dev,
+		   const struct ieee802154_llsec_key_id *id,
+		   const struct ieee802154_llsec_key *key)
+{
+	return rdev->ops->add_llsec_key(&rdev->wpan_phy, wpan_dev, id, key);
+}
+
+static inline int
+rdev_del_llsec_key(struct cfg802154_registered_device *rdev,
+		   struct wpan_dev *wpan_dev,
+		   const struct ieee802154_llsec_key_id *id)
+{
+	return rdev->ops->del_llsec_key(&rdev->wpan_phy, wpan_dev, id);
+}
+
+static inline int
+rdev_add_seclevel(struct cfg802154_registered_device *rdev,
+		  struct wpan_dev *wpan_dev,
+		  const struct ieee802154_llsec_seclevel *sl)
+{
+	return rdev->ops->add_seclevel(&rdev->wpan_phy, wpan_dev, sl);
+}
+
+static inline int
+rdev_del_seclevel(struct cfg802154_registered_device *rdev,
+		  struct wpan_dev *wpan_dev,
+		  const struct ieee802154_llsec_seclevel *sl)
+{
+	return rdev->ops->del_seclevel(&rdev->wpan_phy, wpan_dev, sl);
+}
+
+static inline int
+rdev_add_device(struct cfg802154_registered_device *rdev,
+		struct wpan_dev *wpan_dev,
+		const struct ieee802154_llsec_device *dev_desc)
+{
+	return rdev->ops->add_device(&rdev->wpan_phy, wpan_dev, dev_desc);
+}
+
+static inline int
+rdev_del_device(struct cfg802154_registered_device *rdev,
+		struct wpan_dev *wpan_dev, __le64 extended_addr)
+{
+	return rdev->ops->del_device(&rdev->wpan_phy, wpan_dev, extended_addr);
+}
+
+static inline int
+rdev_add_devkey(struct cfg802154_registered_device *rdev,
+		struct wpan_dev *wpan_dev, __le64 extended_addr,
+		const struct ieee802154_llsec_device_key *devkey)
+{
+	return rdev->ops->add_devkey(&rdev->wpan_phy, wpan_dev, extended_addr,
+				     devkey);
+}
+
+static inline int
+rdev_del_devkey(struct cfg802154_registered_device *rdev,
+		struct wpan_dev *wpan_dev, __le64 extended_addr,
+		const struct ieee802154_llsec_device_key *devkey)
+{
+	return rdev->ops->del_devkey(&rdev->wpan_phy, wpan_dev, extended_addr,
+				     devkey);
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 #endif /* __CFG802154_RDEV_OPS */
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index b6eacf3..a548be2 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -273,7 +273,7 @@
 		goto out;
 	}
 
-	mtu = dev->mtu;
+	mtu = IEEE802154_MTU;
 	pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
 
 	if (size > mtu) {
@@ -637,7 +637,7 @@
 		err = -ENXIO;
 		goto out;
 	}
-	mtu = dev->mtu;
+	mtu = IEEE802154_MTU;
 	pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
 
 	if (size > mtu) {
@@ -676,8 +676,8 @@
 	cb->seclevel = ro->seclevel;
 	cb->seclevel_override = ro->seclevel_override;
 
-	err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr,
-			      ro->bound ? &ro->src_addr : NULL, size);
+	err = wpan_dev_hard_header(skb, dev, &dst_addr,
+				   ro->bound ? &ro->src_addr : NULL, size);
 	if (err < 0)
 		goto out_skb;
 
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 8a55664..11c4ca1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -119,7 +119,7 @@
 #ifdef CONFIG_IP_MROUTE
 #include <linux/mroute.h>
 #endif
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 
 
 /* The inetsw table contains everything that inet_create needs to
@@ -219,17 +219,13 @@
 		 * shutdown() (rather than close()).
 		 */
 		if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
-		    !inet_csk(sk)->icsk_accept_queue.fastopenq) {
+		    !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
 			if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
-				err = fastopen_init_queue(sk, backlog);
+				fastopen_queue_tune(sk, backlog);
 			else if ((sysctl_tcp_fastopen &
 				  TFO_SERVER_WO_SOCKOPT2) != 0)
-				err = fastopen_init_queue(sk,
+				fastopen_queue_tune(sk,
 				    ((uint)sysctl_tcp_fastopen) >> 16);
-			else
-				err = 0;
-			if (err)
-				goto out;
 
 			tcp_fastopen_init_key_once(true);
 		}
@@ -450,7 +446,7 @@
 			goto out;
 	}
 
-	tb_id = vrf_dev_table_ifindex(net, sk->sk_bound_dev_if) ? : tb_id;
+	tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
 	chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
 
 	/* Not specified by any standard per-se, however it breaks too
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 61ff5ea..01308e6 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -113,6 +113,8 @@
 #include <net/arp.h>
 #include <net/ax25.h>
 #include <net/netrom.h>
+#include <net/dst_metadata.h>
+#include <net/ip_tunnels.h>
 
 #include <linux/uaccess.h>
 
@@ -296,7 +298,8 @@
 			 struct net_device *dev, __be32 src_ip,
 			 const unsigned char *dest_hw,
 			 const unsigned char *src_hw,
-			 const unsigned char *target_hw, struct sk_buff *oskb)
+			 const unsigned char *target_hw,
+			 struct dst_entry *dst)
 {
 	struct sk_buff *skb;
 
@@ -309,9 +312,7 @@
 	if (!skb)
 		return;
 
-	if (oskb)
-		skb_dst_copy(skb, oskb);
-
+	skb_dst_set(skb, dst);
 	arp_xmit(skb);
 }
 
@@ -333,6 +334,7 @@
 	__be32 target = *(__be32 *)neigh->primary_key;
 	int probes = atomic_read(&neigh->probes);
 	struct in_device *in_dev;
+	struct dst_entry *dst = NULL;
 
 	rcu_read_lock();
 	in_dev = __in_dev_get_rcu(dev);
@@ -381,9 +383,10 @@
 		}
 	}
 
+	if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE))
+		dst = dst_clone(skb_dst(skb));
 	arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
-		     dst_hw, dev->dev_addr, NULL,
-		     dev->priv_flags & IFF_XMIT_DST_RELEASE ? NULL : skb);
+		     dst_hw, dev->dev_addr, NULL, dst);
 }
 
 static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
@@ -654,6 +657,7 @@
 	u16 dev_type = dev->type;
 	int addr_type;
 	struct neighbour *n;
+	struct dst_entry *reply_dst = NULL;
 	bool is_garp = false;
 
 	/* arp_rcv below verifies the ARP header and verifies the device
@@ -754,13 +758,18 @@
  *  cache.
  */
 
+	if (arp->ar_op == htons(ARPOP_REQUEST) && skb_metadata_dst(skb))
+		reply_dst = (struct dst_entry *)
+			    iptunnel_metadata_reply(skb_metadata_dst(skb),
+						    GFP_ATOMIC);
+
 	/* Special case: IPv4 duplicate address detection packet (RFC2131) */
 	if (sip == 0) {
 		if (arp->ar_op == htons(ARPOP_REQUEST) &&
 		    inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL &&
 		    !arp_ignore(in_dev, sip, tip))
-			arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
-				 dev->dev_addr, sha);
+			arp_send_dst(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip,
+				     sha, dev->dev_addr, sha, reply_dst);
 		goto out;
 	}
 
@@ -779,9 +788,10 @@
 			if (!dont_send) {
 				n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
 				if (n) {
-					arp_send(ARPOP_REPLY, ETH_P_ARP, sip,
-						 dev, tip, sha, dev->dev_addr,
-						 sha);
+					arp_send_dst(ARPOP_REPLY, ETH_P_ARP,
+						     sip, dev, tip, sha,
+						     dev->dev_addr, sha,
+						     reply_dst);
 					neigh_release(n);
 				}
 			}
@@ -799,9 +809,10 @@
 				if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
 				    skb->pkt_type == PACKET_HOST ||
 				    NEIGH_VAR(in_dev->arp_parms, PROXY_DELAY) == 0) {
-					arp_send(ARPOP_REPLY, ETH_P_ARP, sip,
-						 dev, tip, sha, dev->dev_addr,
-						 sha);
+					arp_send_dst(ARPOP_REPLY, ETH_P_ARP,
+						     sip, dev, tip, sha,
+						     dev->dev_addr, sha,
+						     reply_dst);
 				} else {
 					pneigh_enqueue(&arp_tbl,
 						       in_dev->arp_parms, skb);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 6fcbd21..d7c2bb0 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -45,7 +45,7 @@
 #include <net/ip_fib.h>
 #include <net/rtnetlink.h>
 #include <net/xfrm.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 #include <trace/events/fib.h>
 
 #ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -255,7 +255,7 @@
 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
 				__be32 addr)
 {
-	u32 rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL;
+	u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
 
 	return __inet_dev_addr_type(net, dev, addr, rt_table);
 }
@@ -268,7 +268,7 @@
 				      const struct net_device *dev,
 				      __be32 addr)
 {
-	u32 rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL;
+	u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
 
 	return __inet_dev_addr_type(net, NULL, addr, rt_table);
 }
@@ -332,7 +332,7 @@
 	bool dev_match;
 
 	fl4.flowi4_oif = 0;
-	fl4.flowi4_iif = vrf_master_ifindex_rcu(dev);
+	fl4.flowi4_iif = l3mdev_master_ifindex_rcu(dev);
 	if (!fl4.flowi4_iif)
 		fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
 	fl4.daddr = src;
@@ -340,6 +340,7 @@
 	fl4.flowi4_tos = tos;
 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
 	fl4.flowi4_tun_key.tun_id = 0;
+	fl4.flowi4_flags = 0;
 
 	no_addr = idev->ifa_list == NULL;
 
@@ -366,7 +367,7 @@
 		if (nh->nh_dev == dev) {
 			dev_match = true;
 			break;
-		} else if (vrf_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
+		} else if (l3mdev_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
 			dev_match = true;
 			break;
 		}
@@ -803,7 +804,7 @@
 static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
 {
 	struct net *net = dev_net(ifa->ifa_dev->dev);
-	u32 tb_id = vrf_dev_table_rtnl(ifa->ifa_dev->dev);
+	u32 tb_id = l3mdev_fib_table(ifa->ifa_dev->dev);
 	struct fib_table *tb;
 	struct fib_config cfg = {
 		.fc_protocol = RTPROT_KERNEL,
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 064bd3c..af77298 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -57,8 +57,7 @@
 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-
-static DEFINE_SPINLOCK(fib_multipath_lock);
+u32 fib_multipath_secret __read_mostly;
 
 #define for_nexthops(fi) {						\
 	int nhsel; const struct fib_nh *nh;				\
@@ -532,7 +531,67 @@
 	return ret;
 }
 
-#endif
+static void fib_rebalance(struct fib_info *fi)
+{
+	int total;
+	int w;
+	struct in_device *in_dev;
+
+	if (fi->fib_nhs < 2)
+		return;
+
+	total = 0;
+	for_nexthops(fi) {
+		if (nh->nh_flags & RTNH_F_DEAD)
+			continue;
+
+		in_dev = __in_dev_get_rcu(nh->nh_dev);
+
+		if (in_dev &&
+		    IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+		    nh->nh_flags & RTNH_F_LINKDOWN)
+			continue;
+
+		total += nh->nh_weight;
+	} endfor_nexthops(fi);
+
+	w = 0;
+	change_nexthops(fi) {
+		int upper_bound;
+
+		in_dev = __in_dev_get_rcu(nexthop_nh->nh_dev);
+
+		if (nexthop_nh->nh_flags & RTNH_F_DEAD) {
+			upper_bound = -1;
+		} else if (in_dev &&
+			   IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+			   nexthop_nh->nh_flags & RTNH_F_LINKDOWN) {
+			upper_bound = -1;
+		} else {
+			w += nexthop_nh->nh_weight;
+			upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31,
+							    total) - 1;
+		}
+
+		atomic_set(&nexthop_nh->nh_upper_bound, upper_bound);
+	} endfor_nexthops(fi);
+
+	net_get_random_once(&fib_multipath_secret,
+			    sizeof(fib_multipath_secret));
+}
+
+static inline void fib_add_weight(struct fib_info *fi,
+				  const struct fib_nh *nh)
+{
+	fi->fib_weight += nh->nh_weight;
+}
+
+#else /* CONFIG_IP_ROUTE_MULTIPATH */
+
+#define fib_rebalance(fi) do { } while (0)
+#define fib_add_weight(fi, nh) do { } while (0)
+
+#endif /* CONFIG_IP_ROUTE_MULTIPATH */
 
 static int fib_encap_match(struct net *net, u16 encap_type,
 			   struct nlattr *encap,
@@ -1094,8 +1153,11 @@
 
 	change_nexthops(fi) {
 		fib_info_update_nh_saddr(net, nexthop_nh);
+		fib_add_weight(fi, nexthop_nh);
 	} endfor_nexthops(fi)
 
+	fib_rebalance(fi);
+
 link_it:
 	ofi = fib_find_info(fi);
 	if (ofi) {
@@ -1317,12 +1379,6 @@
 					nexthop_nh->nh_flags |= RTNH_F_LINKDOWN;
 					break;
 				}
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
-				spin_lock_bh(&fib_multipath_lock);
-				fi->fib_power -= nexthop_nh->nh_power;
-				nexthop_nh->nh_power = 0;
-				spin_unlock_bh(&fib_multipath_lock);
-#endif
 				dead++;
 			}
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -1345,6 +1401,8 @@
 			}
 			ret++;
 		}
+
+		fib_rebalance(fi);
 	}
 
 	return ret;
@@ -1467,20 +1525,15 @@
 			    !__in_dev_get_rtnl(dev))
 				continue;
 			alive++;
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
-			spin_lock_bh(&fib_multipath_lock);
-			nexthop_nh->nh_power = 0;
 			nexthop_nh->nh_flags &= ~nh_flags;
-			spin_unlock_bh(&fib_multipath_lock);
-#else
-			nexthop_nh->nh_flags &= ~nh_flags;
-#endif
 		} endfor_nexthops(fi)
 
 		if (alive > 0) {
 			fi->fib_flags &= ~nh_flags;
 			ret++;
 		}
+
+		fib_rebalance(fi);
 	}
 
 	return ret;
@@ -1488,62 +1541,40 @@
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 
-/*
- * The algorithm is suboptimal, but it provides really
- * fair weighted route distribution.
- */
-void fib_select_multipath(struct fib_result *res)
+void fib_select_multipath(struct fib_result *res, int hash)
 {
 	struct fib_info *fi = res->fi;
-	struct in_device *in_dev;
-	int w;
 
-	spin_lock_bh(&fib_multipath_lock);
-	if (fi->fib_power <= 0) {
-		int power = 0;
-		change_nexthops(fi) {
-			in_dev = __in_dev_get_rcu(nexthop_nh->nh_dev);
-			if (nexthop_nh->nh_flags & RTNH_F_DEAD)
-				continue;
-			if (in_dev &&
-			    IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
-			    nexthop_nh->nh_flags & RTNH_F_LINKDOWN)
-				continue;
-			power += nexthop_nh->nh_weight;
-			nexthop_nh->nh_power = nexthop_nh->nh_weight;
-		} endfor_nexthops(fi);
-		fi->fib_power = power;
-		if (power <= 0) {
-			spin_unlock_bh(&fib_multipath_lock);
-			/* Race condition: route has just become dead. */
-			res->nh_sel = 0;
-			return;
-		}
-	}
+	for_nexthops(fi) {
+		if (hash > atomic_read(&nh->nh_upper_bound))
+			continue;
 
-
-	/* w should be random number [0..fi->fib_power-1],
-	 * it is pretty bad approximation.
-	 */
-
-	w = jiffies % fi->fib_power;
-
-	change_nexthops(fi) {
-		if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) &&
-		    nexthop_nh->nh_power) {
-			w -= nexthop_nh->nh_power;
-			if (w <= 0) {
-				nexthop_nh->nh_power--;
-				fi->fib_power--;
-				res->nh_sel = nhsel;
-				spin_unlock_bh(&fib_multipath_lock);
-				return;
-			}
-		}
+		res->nh_sel = nhsel;
+		return;
 	} endfor_nexthops(fi);
 
 	/* Race condition: route has just become dead. */
 	res->nh_sel = 0;
-	spin_unlock_bh(&fib_multipath_lock);
 }
 #endif
+
+void fib_select_path(struct net *net, struct fib_result *res,
+		     struct flowi4 *fl4, int mp_hash)
+{
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+	if (res->fi->fib_nhs > 1 && fl4->flowi4_oif == 0) {
+		if (mp_hash < 0)
+			mp_hash = fib_multipath_hash(fl4->saddr, fl4->daddr);
+		fib_select_multipath(res, mp_hash);
+	}
+	else
+#endif
+	if (!res->prefixlen &&
+	    res->table->tb_num_default > 1 &&
+	    res->type == RTN_UNICAST && !fl4->flowi4_oif)
+		fib_select_default(fl4, res);
+
+	if (!fl4->saddr)
+		fl4->saddr = FIB_RES_PREFSRC(net, *res);
+}
+EXPORT_SYMBOL_GPL(fib_select_path);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 26d6ffb..6c2af79 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1426,7 +1426,7 @@
 			    nh->nh_flags & RTNH_F_LINKDOWN &&
 			    !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
 				continue;
-			if (!(flp->flowi4_flags & FLOWI_FLAG_VRFSRC)) {
+			if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
 				if (flp->flowi4_oif &&
 				    flp->flowi4_oif != nh->nh_oif)
 					continue;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 79fe05b..f3c356b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -96,7 +96,7 @@
 #include <net/xfrm.h>
 #include <net/inet_common.h>
 #include <net/ip_fib.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 
 /*
  *	Build xmit assembly blocks
@@ -309,7 +309,7 @@
 
 	rc = false;
 	if (icmp_global_allow()) {
-		int vif = vrf_master_ifindex(dst->dev);
+		int vif = l3mdev_master_ifindex(dst->dev);
 		struct inet_peer *peer;
 
 		peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
@@ -427,7 +427,7 @@
 	fl4.flowi4_mark = mark;
 	fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
 	fl4.flowi4_proto = IPPROTO_ICMP;
-	fl4.flowi4_oif = vrf_master_ifindex(skb->dev) ? : skb->dev->ifindex;
+	fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev);
 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_key(net, &fl4);
 	if (IS_ERR(rt))
@@ -440,6 +440,22 @@
 	icmp_xmit_unlock(sk);
 }
 
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+
+/* Source and destination is swapped. See ip_multipath_icmp_hash */
+static int icmp_multipath_hash_skb(const struct sk_buff *skb)
+{
+	const struct iphdr *iph = ip_hdr(skb);
+
+	return fib_multipath_hash(iph->daddr, iph->saddr);
+}
+
+#else
+
+#define icmp_multipath_hash_skb(skb) (-1)
+
+#endif
+
 static struct rtable *icmp_route_lookup(struct net *net,
 					struct flowi4 *fl4,
 					struct sk_buff *skb_in,
@@ -461,10 +477,11 @@
 	fl4->flowi4_proto = IPPROTO_ICMP;
 	fl4->fl4_icmp_type = type;
 	fl4->fl4_icmp_code = code;
-	fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev) ? : skb_in->dev->ifindex;
+	fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev);
 
 	security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
-	rt = __ip_route_output_key(net, fl4);
+	rt = __ip_route_output_key_hash(net, fl4,
+					icmp_multipath_hash_skb(skb_in));
 	if (IS_ERR(rt))
 		return rt;
 
@@ -642,7 +659,9 @@
 	 */
 
 	saddr = iph->daddr;
-	if (!(rt->rt_flags & RTCF_LOCAL)) {
+	if (!((type == ICMP_REDIRECT) &&
+	      net->ipv4.sysctl_icmp_redirects_use_orig_daddr) &&
+	    !(rt->rt_flags & RTCF_LOCAL)) {
 		struct net_device *dev = NULL;
 
 		rcu_read_lock();
@@ -1205,6 +1224,11 @@
 	net->ipv4.sysctl_icmp_ratemask = 0x1818;
 	net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
 
+	/* Control paramerer - use the daddr of originating packets as saddr
+	 * in redirect messages?
+	 */
+	net->ipv4.sysctl_icmp_redirects_use_orig_daddr = 0;
+
 	return 0;
 
 fail:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d38b8b6..64aaf35 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -397,7 +397,7 @@
 
 	pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
 
-	return ip_local_out(skb);
+	return ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
 }
 
 static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
@@ -739,7 +739,7 @@
 	ih->group = group;
 	ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
 
-	return ip_local_out(skb);
+	return ip_local_out(net, skb->sk, skb);
 }
 
 static void igmp_gq_timer_expire(unsigned long data)
@@ -2569,7 +2569,7 @@
 }
 
 /* called with rcu_read_lock() */
-int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto)
+int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u8 proto)
 {
 	struct ip_mc_list *im;
 	struct ip_mc_list __rcu **mc_hash;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 1349571..514b9e9 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -330,14 +330,12 @@
 		if (error)
 			goto out_err;
 	}
-	req = reqsk_queue_remove(queue);
+	req = reqsk_queue_remove(queue, sk);
 	newsk = req->sk;
 
-	sk_acceptq_removed(sk);
 	if (sk->sk_protocol == IPPROTO_TCP &&
-	    tcp_rsk(req)->tfo_listener &&
-	    queue->fastopenq) {
-		spin_lock_bh(&queue->fastopenq->lock);
+	    tcp_rsk(req)->tfo_listener) {
+		spin_lock_bh(&queue->fastopenq.lock);
 		if (tcp_rsk(req)->tfo_listener) {
 			/* We are still waiting for the final ACK from 3WHS
 			 * so can't free req now. Instead, we set req->sk to
@@ -348,7 +346,7 @@
 			req->sk = NULL;
 			req = NULL;
 		}
-		spin_unlock_bh(&queue->fastopenq->lock);
+		spin_unlock_bh(&queue->fastopenq.lock);
 	}
 out:
 	release_sock(sk);
@@ -408,7 +406,7 @@
 }
 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
 
-struct dst_entry *inet_csk_route_req(struct sock *sk,
+struct dst_entry *inet_csk_route_req(const struct sock *sk,
 				     struct flowi4 *fl4,
 				     const struct request_sock *req)
 {
@@ -439,7 +437,7 @@
 }
 EXPORT_SYMBOL_GPL(inet_csk_route_req);
 
-struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
+struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
 					    struct sock *newsk,
 					    const struct request_sock *req)
 {
@@ -478,65 +476,12 @@
 }
 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
 
-static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
-				 const u32 rnd, const u32 synq_hsize)
-{
-	return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
-}
-
 #if IS_ENABLED(CONFIG_IPV6)
 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
 #else
 #define AF_INET_FAMILY(fam) true
 #endif
 
-/* Note: this is temporary :
- * req sock will no longer be in listener hash table
-*/
-struct request_sock *inet_csk_search_req(struct sock *sk,
-					 const __be16 rport,
-					 const __be32 raddr,
-					 const __be32 laddr)
-{
-	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-	struct request_sock *req;
-	u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
-				  lopt->nr_table_entries);
-
-	spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
-	for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
-		const struct inet_request_sock *ireq = inet_rsk(req);
-
-		if (ireq->ir_rmt_port == rport &&
-		    ireq->ir_rmt_addr == raddr &&
-		    ireq->ir_loc_addr == laddr &&
-		    AF_INET_FAMILY(req->rsk_ops->family)) {
-			atomic_inc(&req->rsk_refcnt);
-			WARN_ON(req->sk);
-			break;
-		}
-	}
-	spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
-
-	return req;
-}
-EXPORT_SYMBOL_GPL(inet_csk_search_req);
-
-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
-				   unsigned long timeout)
-{
-	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-	const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
-				     inet_rsk(req)->ir_rmt_port,
-				     lopt->hash_rnd, lopt->nr_table_entries);
-
-	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
-	inet_csk_reqsk_queue_added(sk, timeout);
-}
-EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
-
 /* Only thing we need from tcp.h */
 extern int sysctl_tcp_synack_retries;
 
@@ -563,7 +508,7 @@
 		  req->num_timeout >= rskq_defer_accept - 1;
 }
 
-int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
+int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
 {
 	int err = req->rsk_ops->rtx_syn_ack(parent, req);
 
@@ -573,26 +518,20 @@
 }
 EXPORT_SYMBOL(inet_rtx_syn_ack);
 
-/* return true if req was found in the syn_table[] */
+/* return true if req was found in the ehash table */
 static bool reqsk_queue_unlink(struct request_sock_queue *queue,
 			       struct request_sock *req)
 {
-	struct listen_sock *lopt = queue->listen_opt;
-	struct request_sock **prev;
-	bool found = false;
+	struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
+	spinlock_t *lock;
+	bool found;
 
-	spin_lock(&queue->syn_wait_lock);
+	lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
 
-	for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
-	     prev = &(*prev)->dl_next) {
-		if (*prev == req) {
-			*prev = req->dl_next;
-			found = true;
-			break;
-		}
-	}
+	spin_lock(lock);
+	found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
+	spin_unlock(lock);
 
-	spin_unlock(&queue->syn_wait_lock);
 	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
 		reqsk_put(req);
 	return found;
@@ -613,15 +552,12 @@
 	struct sock *sk_listener = req->rsk_listener;
 	struct inet_connection_sock *icsk = inet_csk(sk_listener);
 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
-	struct listen_sock *lopt = queue->listen_opt;
 	int qlen, expire = 0, resend = 0;
 	int max_retries, thresh;
 	u8 defer_accept;
 
-	if (sk_listener->sk_state != TCP_LISTEN || !lopt) {
-		reqsk_put(req);
-		return;
-	}
+	if (sk_listener->sk_state != TCP_LISTEN)
+		goto drop;
 
 	max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
 	thresh = max_retries;
@@ -642,9 +578,9 @@
 	 * embrions; and abort old ones without pity, if old
 	 * ones are about to clog our table.
 	 */
-	qlen = listen_sock_qlen(lopt);
-	if (qlen >> (lopt->max_qlen_log - 1)) {
-		int young = listen_sock_young(lopt) << 1;
+	qlen = reqsk_queue_len(queue);
+	if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) {
+		int young = reqsk_queue_len_young(queue) << 1;
 
 		while (thresh > 2) {
 			if (qlen < young)
@@ -666,41 +602,41 @@
 		unsigned long timeo;
 
 		if (req->num_timeout++ == 0)
-			atomic_inc(&lopt->young_dec);
+			atomic_dec(&queue->young);
 		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
 		mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
 		return;
 	}
+drop:
 	inet_csk_reqsk_queue_drop(sk_listener, req);
 	reqsk_put(req);
 }
 
-void reqsk_queue_hash_req(struct request_sock_queue *queue,
-			  u32 hash, struct request_sock *req,
-			  unsigned long timeout)
+static void reqsk_queue_hash_req(struct request_sock *req,
+				 unsigned long timeout)
 {
-	struct listen_sock *lopt = queue->listen_opt;
-
 	req->num_retrans = 0;
 	req->num_timeout = 0;
 	req->sk = NULL;
 
+	setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
+	mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
+
+	inet_ehash_insert(req_to_sk(req), NULL);
 	/* before letting lookups find us, make sure all req fields
 	 * are committed to memory and refcnt initialized.
 	 */
 	smp_wmb();
-	atomic_set(&req->rsk_refcnt, 2);
-	setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
-	req->rsk_hash = hash;
-
-	spin_lock(&queue->syn_wait_lock);
-	req->dl_next = lopt->syn_table[hash];
-	lopt->syn_table[hash] = req;
-	spin_unlock(&queue->syn_wait_lock);
-
-	mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
+	atomic_set(&req->rsk_refcnt, 2 + 1);
 }
-EXPORT_SYMBOL(reqsk_queue_hash_req);
+
+void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+				   unsigned long timeout)
+{
+	reqsk_queue_hash_req(req, timeout);
+	inet_csk_reqsk_queue_added(sk);
+}
+EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
 
 /**
  *	inet_csk_clone_lock - clone an inet socket, and lock its clone
@@ -793,12 +729,10 @@
 
 int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
 {
-	struct inet_sock *inet = inet_sk(sk);
 	struct inet_connection_sock *icsk = inet_csk(sk);
-	int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
+	struct inet_sock *inet = inet_sk(sk);
 
-	if (rc != 0)
-		return rc;
+	reqsk_queue_alloc(&icsk->icsk_accept_queue);
 
 	sk->sk_max_ack_backlog = 0;
 	sk->sk_ack_backlog = 0;
@@ -820,7 +754,6 @@
 	}
 
 	sk->sk_state = TCP_CLOSE;
-	__reqsk_queue_destroy(&icsk->icsk_accept_queue);
 	return -EADDRINUSE;
 }
 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
@@ -833,11 +766,7 @@
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
-	struct request_sock *acc_req;
-	struct request_sock *req;
-
-	/* make all the listen_opt local to us */
-	acc_req = reqsk_queue_yank_acceptq(queue);
+	struct request_sock *next, *req;
 
 	/* Following specs, it would be better either to send FIN
 	 * (and enter FIN-WAIT-1, it is normal close)
@@ -847,13 +776,9 @@
 	 * To be honest, we are not able to make either
 	 * of the variants now.			--ANK
 	 */
-	reqsk_queue_destroy(queue);
-
-	while ((req = acc_req) != NULL) {
+	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
 		struct sock *child = req->sk;
 
-		acc_req = req->dl_next;
-
 		local_bh_disable();
 		bh_lock_sock(child);
 		WARN_ON(sock_owned_by_user(child));
@@ -883,18 +808,19 @@
 		local_bh_enable();
 		sock_put(child);
 
-		sk_acceptq_removed(sk);
 		reqsk_put(req);
+		cond_resched();
 	}
-	if (queue->fastopenq) {
+	if (queue->fastopenq.rskq_rst_head) {
 		/* Free all the reqs queued in rskq_rst_head. */
-		spin_lock_bh(&queue->fastopenq->lock);
-		acc_req = queue->fastopenq->rskq_rst_head;
-		queue->fastopenq->rskq_rst_head = NULL;
-		spin_unlock_bh(&queue->fastopenq->lock);
-		while ((req = acc_req) != NULL) {
-			acc_req = req->dl_next;
+		spin_lock_bh(&queue->fastopenq.lock);
+		req = queue->fastopenq.rskq_rst_head;
+		queue->fastopenq.rskq_rst_head = NULL;
+		spin_unlock_bh(&queue->fastopenq.lock);
+		while (req != NULL) {
+			next = req->dl_next;
 			reqsk_put(req);
+			req = next;
 		}
 	}
 	WARN_ON(sk->sk_ack_backlog);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index c3b1f3a..ab9f8a6 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -730,91 +730,21 @@
 #endif
 }
 
-static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
-			       struct netlink_callback *cb,
-			       const struct inet_diag_req_v2 *r,
-			       const struct nlattr *bc)
-{
-	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct inet_sock *inet = inet_sk(sk);
-	struct inet_diag_entry entry;
-	int j, s_j, reqnum, s_reqnum;
-	struct listen_sock *lopt;
-	int err = 0;
-
-	s_j = cb->args[3];
-	s_reqnum = cb->args[4];
-
-	if (s_j > 0)
-		s_j--;
-
-	entry.family = sk->sk_family;
-
-	spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
-
-	lopt = icsk->icsk_accept_queue.listen_opt;
-	if (!lopt || !listen_sock_qlen(lopt))
-		goto out;
-
-	if (bc) {
-		entry.sport = inet->inet_num;
-		entry.userlocks = sk->sk_userlocks;
-	}
-
-	for (j = s_j; j < lopt->nr_table_entries; j++) {
-		struct request_sock *req, *head = lopt->syn_table[j];
-
-		reqnum = 0;
-		for (req = head; req; reqnum++, req = req->dl_next) {
-			struct inet_request_sock *ireq = inet_rsk(req);
-
-			if (reqnum < s_reqnum)
-				continue;
-			if (r->id.idiag_dport != ireq->ir_rmt_port &&
-			    r->id.idiag_dport)
-				continue;
-
-			if (bc) {
-				/* Note: entry.sport and entry.userlocks are already set */
-				entry_fill_addrs(&entry, req_to_sk(req));
-				entry.dport = ntohs(ireq->ir_rmt_port);
-
-				if (!inet_diag_bc_run(bc, &entry))
-					continue;
-			}
-
-			err = inet_req_diag_fill(req_to_sk(req), skb,
-						 NETLINK_CB(cb->skb).portid,
-						 cb->nlh->nlmsg_seq,
-						 NLM_F_MULTI, cb->nlh);
-			if (err < 0) {
-				cb->args[3] = j + 1;
-				cb->args[4] = reqnum;
-				goto out;
-			}
-		}
-
-		s_reqnum = 0;
-	}
-
-out:
-	spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
-
-	return err;
-}
-
 void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
 			 struct netlink_callback *cb,
 			 const struct inet_diag_req_v2 *r, struct nlattr *bc)
 {
 	struct net *net = sock_net(skb->sk);
 	int i, num, s_i, s_num;
+	u32 idiag_states = r->idiag_states;
 
+	if (idiag_states & TCPF_SYN_RECV)
+		idiag_states |= TCPF_NEW_SYN_RECV;
 	s_i = cb->args[1];
 	s_num = num = cb->args[2];
 
 	if (cb->args[0] == 0) {
-		if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
+		if (!(idiag_states & TCPF_LISTEN))
 			goto skip_listen_ht;
 
 		for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
@@ -844,21 +774,11 @@
 				    r->id.idiag_sport)
 					goto next_listen;
 
-				if (!(r->idiag_states & TCPF_LISTEN) ||
-				    r->id.idiag_dport ||
+				if (r->id.idiag_dport ||
 				    cb->args[3] > 0)
-					goto syn_recv;
-
-				if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
-					spin_unlock_bh(&ilb->lock);
-					goto done;
-				}
-
-syn_recv:
-				if (!(r->idiag_states & TCPF_SYN_RECV))
 					goto next_listen;
 
-				if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
+				if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
 					spin_unlock_bh(&ilb->lock);
 					goto done;
 				}
@@ -879,7 +799,7 @@
 		s_i = num = s_num = 0;
 	}
 
-	if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
+	if (!(idiag_states & ~TCPF_LISTEN))
 		goto out;
 
 	for (i = s_i; i <= hashinfo->ehash_mask; i++) {
@@ -906,7 +826,7 @@
 				goto next_normal;
 			state = (sk->sk_state == TCP_TIME_WAIT) ?
 				inet_twsk(sk)->tw_substate : sk->sk_state;
-			if (!(r->idiag_states & (1 << state)))
+			if (!(idiag_states & (1 << state)))
 				goto next_normal;
 			if (r->sdiag_family != AF_UNSPEC &&
 			    sk->sk_family != r->sdiag_family)
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 8912019..08643a3 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -126,7 +126,7 @@
 }
 EXPORT_SYMBOL(inet_put_port);
 
-int __inet_inherit_port(struct sock *sk, struct sock *child)
+int __inet_inherit_port(const struct sock *sk, struct sock *child)
 {
 	struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
 	unsigned short port = inet_sk(child)->inet_num;
@@ -185,6 +185,8 @@
 				return -1;
 			score += 4;
 		}
+		if (sk->sk_incoming_cpu == raw_smp_processor_id())
+			score++;
 	}
 	return score;
 }
@@ -398,14 +400,18 @@
 					  inet->inet_dport);
 }
 
-void __inet_hash_nolisten(struct sock *sk, struct sock *osk)
+/* insert a socket into ehash, and eventually remove another one
+ * (The another one can be a SYN_RECV or TIMEWAIT
+ */
+int inet_ehash_insert(struct sock *sk, struct sock *osk)
 {
 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
 	struct hlist_nulls_head *list;
 	struct inet_ehash_bucket *head;
 	spinlock_t *lock;
+	int ret = 0;
 
-	WARN_ON(!sk_unhashed(sk));
+	WARN_ON_ONCE(!sk_unhashed(sk));
 
 	sk->sk_hash = sk_ehashfn(sk);
 	head = inet_ehash_bucket(hashinfo, sk->sk_hash);
@@ -419,6 +425,12 @@
 		sk_nulls_del_node_init_rcu(osk);
 	}
 	spin_unlock(lock);
+	return ret;
+}
+
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk)
+{
+	inet_ehash_insert(sk, osk);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 }
 EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index ae22cc2..c67f9bd 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -123,13 +123,15 @@
 	/*
 	 * Step 2: Hash TW into tcp ehash chain.
 	 * Notes :
-	 * - tw_refcnt is set to 3 because :
+	 * - tw_refcnt is set to 4 because :
 	 * - We have one reference from bhash chain.
 	 * - We have one reference from ehash chain.
+	 * - We have one reference from timer.
+	 * - One reference for ourself (our caller will release it).
 	 * We can use atomic_set() because prior spin_lock()/spin_unlock()
 	 * committed into memory all tw fields.
 	 */
-	atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
+	atomic_set(&tw->tw_refcnt, 4);
 	inet_twsk_add_node_rcu(tw, &ehead->chain);
 
 	/* Step 3: Remove SK from hash chain */
@@ -217,7 +219,7 @@
 }
 EXPORT_SYMBOL(inet_twsk_deschedule_put);
 
-void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
+void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
 {
 	/* timeout := RTO * 3.5
 	 *
@@ -245,12 +247,14 @@
 	 */
 
 	tw->tw_kill = timeo <= 4*HZ;
-	if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) {
-		atomic_inc(&tw->tw_refcnt);
+	if (!rearm) {
+		BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo));
 		atomic_inc(&tw->tw_dr->tw_count);
+	} else {
+		mod_timer_pending(&tw->tw_timer, jiffies + timeo);
 	}
 }
-EXPORT_SYMBOL_GPL(inet_twsk_schedule);
+EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
 
 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
 		     struct inet_timewait_death_row *twdr, int family)
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index d66cfb3..da0d7ce 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -72,7 +72,7 @@
 		ip_forward_options(skb);
 
 	skb_sender_cpu_clear(skb);
-	return dst_output(sk, skb);
+	return dst_output(net, sk, skb);
 }
 
 int ip_forward(struct sk_buff *skb)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index fa7f153..5482745 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -48,7 +48,7 @@
 #include <linux/inet.h>
 #include <linux/netfilter_ipv4.h>
 #include <net/inet_ecn.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 
 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
  * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
@@ -78,7 +78,7 @@
 	u8		ecn; /* RFC3168 support */
 	u16		max_df_size; /* largest frag with DF set seen */
 	int             iif;
-	int             vif;   /* VRF device index */
+	int             vif;   /* L3 master device index */
 	unsigned int    rid;
 	struct inet_peer *peer;
 };
@@ -654,11 +654,10 @@
 }
 
 /* Process an incoming IP datagram fragment. */
-int ip_defrag(struct sk_buff *skb, u32 user)
+int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
 {
 	struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
-	int vif = vrf_master_ifindex_rcu(dev);
-	struct net *net = dev_net(dev);
+	int vif = l3mdev_master_ifindex_rcu(dev);
 	struct ipq *qp;
 
 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
@@ -683,7 +682,7 @@
 }
 EXPORT_SYMBOL(ip_defrag);
 
-struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
 {
 	struct iphdr iph;
 	int netoff;
@@ -712,7 +711,7 @@
 			if (pskb_trim_rcsum(skb, netoff + len))
 				return skb;
 			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
-			if (ip_defrag(skb, user))
+			if (ip_defrag(net, skb, user))
 				return NULL;
 			skb_clear_hash(skb);
 		}
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 7cc9f7b..b1209b6 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -157,6 +157,7 @@
 	u8 protocol = ip_hdr(skb)->protocol;
 	struct sock *last = NULL;
 	struct net_device *dev = skb->dev;
+	struct net *net = dev_net(dev);
 
 	for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) {
 		struct sock *sk = ra->sk;
@@ -167,9 +168,9 @@
 		if (sk && inet_sk(sk)->inet_num == protocol &&
 		    (!sk->sk_bound_dev_if ||
 		     sk->sk_bound_dev_if == dev->ifindex) &&
-		    net_eq(sock_net(sk), dev_net(dev))) {
+		    net_eq(sock_net(sk), net)) {
 			if (ip_is_fragment(ip_hdr(skb))) {
-				if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
+				if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
 					return true;
 			}
 			if (last) {
@@ -246,14 +247,15 @@
 	/*
 	 *	Reassemble IP fragments.
 	 */
+	struct net *net = dev_net(skb->dev);
 
 	if (ip_is_fragment(ip_hdr(skb))) {
-		if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER))
+		if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
 			return 0;
 	}
 
 	return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
-		       dev_net(skb->dev), NULL, skb, skb->dev, NULL,
+		       net, NULL, skb, skb->dev, NULL,
 		       ip_local_deliver_finish);
 }
 
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 09a6b7b..67404e1 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -83,9 +83,10 @@
 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
 EXPORT_SYMBOL(sysctl_ip_default_ttl);
 
-static int ip_fragment(struct sock *sk, struct sk_buff *skb,
-		       unsigned int mtu,
-		       int (*output)(struct sock *, struct sk_buff *));
+static int
+ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+	    unsigned int mtu,
+	    int (*output)(struct net *, struct sock *, struct sk_buff *));
 
 /* Generate a checksum for an outgoing IP datagram. */
 void ip_send_check(struct iphdr *iph)
@@ -95,34 +96,28 @@
 }
 EXPORT_SYMBOL(ip_send_check);
 
-static int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
+int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
 	struct iphdr *iph = ip_hdr(skb);
 
 	iph->tot_len = htons(skb->len);
 	ip_send_check(iph);
 	return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
 		       net, sk, skb, NULL, skb_dst(skb)->dev,
-		       dst_output_okfn);
+		       dst_output);
 }
 
-int __ip_local_out(struct sk_buff *skb)
-{
-	return __ip_local_out_sk(skb->sk, skb);
-}
-
-int ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
+int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	int err;
 
-	err = __ip_local_out(skb);
+	err = __ip_local_out(net, sk, skb);
 	if (likely(err == 1))
-		err = dst_output(sk, skb);
+		err = dst_output(net, sk, skb);
 
 	return err;
 }
-EXPORT_SYMBOL_GPL(ip_local_out_sk);
+EXPORT_SYMBOL_GPL(ip_local_out);
 
 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
 {
@@ -137,11 +132,12 @@
  *		Add an ip header to a skbuff and send it out.
  *
  */
-int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
 			  __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
 {
 	struct inet_sock *inet = inet_sk(sk);
 	struct rtable *rt = skb_rtable(skb);
+	struct net *net = sock_net(sk);
 	struct iphdr *iph;
 
 	/* Build the IP header. */
@@ -151,15 +147,17 @@
 	iph->version  = 4;
 	iph->ihl      = 5;
 	iph->tos      = inet->tos;
-	if (ip_dont_fragment(sk, &rt->dst))
-		iph->frag_off = htons(IP_DF);
-	else
-		iph->frag_off = 0;
 	iph->ttl      = ip_select_ttl(inet, &rt->dst);
 	iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
 	iph->saddr    = saddr;
 	iph->protocol = sk->sk_protocol;
-	ip_select_ident(sock_net(sk), skb, sk);
+	if (ip_dont_fragment(sk, &rt->dst)) {
+		iph->frag_off = htons(IP_DF);
+		iph->id = 0;
+	} else {
+		iph->frag_off = 0;
+		__ip_select_ident(net, iph, 1);
+	}
 
 	if (opt && opt->opt.optlen) {
 		iph->ihl += opt->opt.optlen>>2;
@@ -170,16 +168,15 @@
 	skb->mark = sk->sk_mark;
 
 	/* Send it out. */
-	return ip_local_out(skb);
+	return ip_local_out(net, skb->sk, skb);
 }
 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
 
-static int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
+static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
 	struct rtable *rt = (struct rtable *)dst;
 	struct net_device *dev = dst->dev;
-	struct net *net = dev_net(dev);
 	unsigned int hh_len = LL_RESERVED_SPACE(dev);
 	struct neighbour *neigh;
 	u32 nexthop;
@@ -223,8 +220,8 @@
 	return -EINVAL;
 }
 
-static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb,
-				unsigned int mtu)
+static int ip_finish_output_gso(struct net *net, struct sock *sk,
+				struct sk_buff *skb, unsigned int mtu)
 {
 	netdev_features_t features;
 	struct sk_buff *segs;
@@ -233,7 +230,7 @@
 	/* common case: locally created skb or seglen is <= mtu */
 	if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
 	      skb_gso_network_seglen(skb) <= mtu)
-		return ip_finish_output2(sk, skb);
+		return ip_finish_output2(net, sk, skb);
 
 	/* Slowpath -  GSO segment length is exceeding the dst MTU.
 	 *
@@ -256,7 +253,7 @@
 		int err;
 
 		segs->next = NULL;
-		err = ip_fragment(sk, segs, mtu, ip_finish_output2);
+		err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
 
 		if (err && ret == 0)
 			ret = err;
@@ -274,24 +271,23 @@
 	/* Policy lookup after SNAT yielded a new policy */
 	if (skb_dst(skb)->xfrm) {
 		IPCB(skb)->flags |= IPSKB_REROUTED;
-		return dst_output(sk, skb);
+		return dst_output(net, sk, skb);
 	}
 #endif
 	mtu = ip_skb_dst_mtu(skb);
 	if (skb_is_gso(skb))
-		return ip_finish_output_gso(sk, skb, mtu);
+		return ip_finish_output_gso(net, sk, skb, mtu);
 
 	if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
-		return ip_fragment(sk, skb, mtu, ip_finish_output2);
+		return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
 
-	return ip_finish_output2(sk, skb);
+	return ip_finish_output2(net, sk, skb);
 }
 
-int ip_mc_output(struct sock *sk, struct sk_buff *skb)
+int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct rtable *rt = skb_rtable(skb);
 	struct net_device *dev = rt->dst.dev;
-	struct net *net = dev_net(dev);
 
 	/*
 	 *	If the indicated interface is up and running, send the packet.
@@ -350,10 +346,9 @@
 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
-int ip_output(struct sock *sk, struct sk_buff *skb)
+int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct net_device *dev = skb_dst(skb)->dev;
-	struct net *net = dev_net(dev);
 
 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
 
@@ -384,6 +379,7 @@
 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
 {
 	struct inet_sock *inet = inet_sk(sk);
+	struct net *net = sock_net(sk);
 	struct ip_options_rcu *inet_opt;
 	struct flowi4 *fl4;
 	struct rtable *rt;
@@ -414,7 +410,7 @@
 		 * keep trying until route appears or the connection times
 		 * itself out.
 		 */
-		rt = ip_route_output_ports(sock_net(sk), fl4, sk,
+		rt = ip_route_output_ports(net, fl4, sk,
 					   daddr, inet->inet_saddr,
 					   inet->inet_dport,
 					   inet->inet_sport,
@@ -451,20 +447,20 @@
 		ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
 	}
 
-	ip_select_ident_segs(sock_net(sk), skb, sk,
+	ip_select_ident_segs(net, skb, sk,
 			     skb_shinfo(skb)->gso_segs ?: 1);
 
 	/* TODO : should we use skb->sk here instead of sk ? */
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
 
-	res = ip_local_out(skb);
+	res = ip_local_out(net, sk, skb);
 	rcu_read_unlock();
 	return res;
 
 no_route:
 	rcu_read_unlock();
-	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+	IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 	kfree_skb(skb);
 	return -EHOSTUNREACH;
 }
@@ -493,20 +489,18 @@
 	skb_copy_secmark(to, from);
 }
 
-static int ip_fragment(struct sock *sk, struct sk_buff *skb,
+static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 		       unsigned int mtu,
-		       int (*output)(struct sock *, struct sk_buff *))
+		       int (*output)(struct net *, struct sock *, struct sk_buff *))
 {
 	struct iphdr *iph = ip_hdr(skb);
 
 	if ((iph->frag_off & htons(IP_DF)) == 0)
-		return ip_do_fragment(sk, skb, output);
+		return ip_do_fragment(net, sk, skb, output);
 
 	if (unlikely(!skb->ignore_df ||
 		     (IPCB(skb)->frag_max_size &&
 		      IPCB(skb)->frag_max_size > mtu))) {
-		struct net *net = dev_net(skb_rtable(skb)->dst.dev);
-
 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
 			  htonl(mtu));
@@ -514,7 +508,7 @@
 		return -EMSGSIZE;
 	}
 
-	return ip_do_fragment(sk, skb, output);
+	return ip_do_fragment(net, sk, skb, output);
 }
 
 /*
@@ -524,8 +518,8 @@
  *	single device frame, and queue such a frame for sending.
  */
 
-int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
-		   int (*output)(struct sock *, struct sk_buff *))
+int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+		   int (*output)(struct net *, struct sock *, struct sk_buff *))
 {
 	struct iphdr *iph;
 	int ptr;
@@ -535,11 +529,9 @@
 	int offset;
 	__be16 not_last_frag;
 	struct rtable *rt = skb_rtable(skb);
-	struct net *net;
 	int err = 0;
 
 	dev = rt->dst.dev;
-	net = dev_net(dev);
 
 	/*
 	 *	Point into the IP datagram header.
@@ -629,7 +621,7 @@
 				ip_send_check(iph);
 			}
 
-			err = output(sk, skb);
+			err = output(net, sk, skb);
 
 			if (!err)
 				IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
@@ -769,7 +761,7 @@
 
 		ip_send_check(iph);
 
-		err = output(sk, skb2);
+		err = output(net, sk, skb2);
 		if (err)
 			goto fail;
 
@@ -1442,7 +1434,7 @@
 {
 	int err;
 
-	err = ip_local_out(skb);
+	err = ip_local_out(net, skb->sk, skb);
 	if (err) {
 		if (err > 0)
 			err = net_xmit_errno(err);
@@ -1569,7 +1561,7 @@
 	}
 
 	oif = arg->bound_dev_if;
-	if (!oif && netif_index_is_vrf(net, skb->skb_iif))
+	if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
 		oif = skb->skb_iif;
 
 	flowi4_init_output(&fl4, oif,
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 29ed6c5..6cb9009 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -46,12 +46,14 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
+#include <net/dst_metadata.h>
 
 int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
 		  __be32 src, __be32 dst, __u8 proto,
 		  __u8 tos, __u8 ttl, __be16 df, bool xnet)
 {
-	int pkt_len = skb->len;
+	int pkt_len = skb->len - skb_inner_network_offset(skb);
+	struct net *net = dev_net(rt->dst.dev);
 	struct iphdr *iph;
 	int err;
 
@@ -75,10 +77,9 @@
 	iph->daddr	=	dst;
 	iph->saddr	=	src;
 	iph->ttl	=	ttl;
-	__ip_select_ident(dev_net(rt->dst.dev), iph,
-			  skb_shinfo(skb)->gso_segs ?: 1);
+	__ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1);
 
-	err = ip_local_out_sk(sk, skb);
+	err = ip_local_out(net, sk, skb);
 	if (unlikely(net_xmit_eval(err)))
 		pkt_len = 0;
 	return pkt_len;
@@ -119,6 +120,33 @@
 }
 EXPORT_SYMBOL_GPL(iptunnel_pull_header);
 
+struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
+					     gfp_t flags)
+{
+	struct metadata_dst *res;
+	struct ip_tunnel_info *dst, *src;
+
+	if (!md || md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
+		return NULL;
+
+	res = metadata_dst_alloc(0, flags);
+	if (!res)
+		return NULL;
+
+	dst = &res->u.tun_info;
+	src = &md->u.tun_info;
+	dst->key.tun_id = src->key.tun_id;
+	if (src->mode & IP_TUNNEL_INFO_IPV6)
+		memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src,
+		       sizeof(struct in6_addr));
+	else
+		dst->key.u.ipv4.dst = src->key.u.ipv4.src;
+	dst->mode = src->mode | IP_TUNNEL_INFO_TX;
+
+	return res;
+}
+EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
+
 struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
 					 bool csum_help,
 					 int gso_type_mask)
@@ -198,8 +226,6 @@
 	[LWTUNNEL_IP_SRC]	= { .type = NLA_U32 },
 	[LWTUNNEL_IP_TTL]	= { .type = NLA_U8 },
 	[LWTUNNEL_IP_TOS]	= { .type = NLA_U8 },
-	[LWTUNNEL_IP_SPORT]	= { .type = NLA_U16 },
-	[LWTUNNEL_IP_DPORT]	= { .type = NLA_U16 },
 	[LWTUNNEL_IP_FLAGS]	= { .type = NLA_U16 },
 };
 
@@ -239,12 +265,6 @@
 	if (tb[LWTUNNEL_IP_TOS])
 		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
 
-	if (tb[LWTUNNEL_IP_SPORT])
-		tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]);
-
-	if (tb[LWTUNNEL_IP_DPORT])
-		tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP_DPORT]);
-
 	if (tb[LWTUNNEL_IP_FLAGS])
 		tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]);
 
@@ -266,8 +286,6 @@
 	    nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
 	    nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
 	    nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
-	    nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) ||
-	    nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) ||
 	    nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
 		return -ENOMEM;
 
@@ -281,8 +299,6 @@
 		+ nla_total_size(4)	/* LWTUNNEL_IP_SRC */
 		+ nla_total_size(1)	/* LWTUNNEL_IP_TOS */
 		+ nla_total_size(1)	/* LWTUNNEL_IP_TTL */
-		+ nla_total_size(2)	/* LWTUNNEL_IP_SPORT */
-		+ nla_total_size(2)	/* LWTUNNEL_IP_DPORT */
 		+ nla_total_size(2);	/* LWTUNNEL_IP_FLAGS */
 }
 
@@ -305,8 +321,6 @@
 	[LWTUNNEL_IP6_SRC]		= { .len = sizeof(struct in6_addr) },
 	[LWTUNNEL_IP6_HOPLIMIT]		= { .type = NLA_U8 },
 	[LWTUNNEL_IP6_TC]		= { .type = NLA_U8 },
-	[LWTUNNEL_IP6_SPORT]		= { .type = NLA_U16 },
-	[LWTUNNEL_IP6_DPORT]		= { .type = NLA_U16 },
 	[LWTUNNEL_IP6_FLAGS]		= { .type = NLA_U16 },
 };
 
@@ -346,12 +360,6 @@
 	if (tb[LWTUNNEL_IP6_TC])
 		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
 
-	if (tb[LWTUNNEL_IP6_SPORT])
-		tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP6_SPORT]);
-
-	if (tb[LWTUNNEL_IP6_DPORT])
-		tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP6_DPORT]);
-
 	if (tb[LWTUNNEL_IP6_FLAGS])
 		tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]);
 
@@ -373,8 +381,6 @@
 	    nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
 	    nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
 	    nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
-	    nla_put_u16(skb, LWTUNNEL_IP6_SPORT, tun_info->key.tp_src) ||
-	    nla_put_u16(skb, LWTUNNEL_IP6_DPORT, tun_info->key.tp_dst) ||
 	    nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
 		return -ENOMEM;
 
@@ -388,8 +394,6 @@
 		+ nla_total_size(16)	/* LWTUNNEL_IP6_SRC */
 		+ nla_total_size(1)	/* LWTUNNEL_IP6_HOPLIMIT */
 		+ nla_total_size(1)	/* LWTUNNEL_IP6_TC */
-		+ nla_total_size(2)	/* LWTUNNEL_IP6_SPORT */
-		+ nla_total_size(2)	/* LWTUNNEL_IP6_DPORT */
 		+ nla_total_size(2);	/* LWTUNNEL_IP6_FLAGS */
 }
 
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 3b87ec51..4d8f0b6 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -197,7 +197,7 @@
 	skb_dst_set(skb, dst);
 	skb->dev = skb_dst(skb)->dev;
 
-	err = dst_output(skb->sk, skb);
+	err = dst_output(tunnel->net, skb->sk, skb);
 	if (net_xmit_eval(err) == 0)
 		err = skb->len;
 	iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index cfcb996..fc42525 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1689,7 +1689,7 @@
 	if (unlikely(opt->optlen))
 		ip_forward_options(skb);
 
-	return dst_output(sk, skb);
+	return dst_output(net, sk, skb);
 }
 
 /*
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 61eafc9..c3776ff 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -17,9 +17,8 @@
 #include <net/netfilter/nf_queue.h>
 
 /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
-int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type)
+int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_type)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
 	const struct iphdr *iph = ip_hdr(skb);
 	struct rtable *rt;
 	struct flowi4 fl4 = {};
@@ -104,7 +103,7 @@
 	}
 }
 
-static int nf_ip_reroute(struct sk_buff *skb,
+static int nf_ip_reroute(struct net *net, struct sk_buff *skb,
 			 const struct nf_queue_entry *entry)
 {
 	const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
@@ -116,7 +115,7 @@
 		      skb->mark == rt_info->mark &&
 		      iph->daddr == rt_info->daddr &&
 		      iph->saddr == rt_info->saddr))
-			return ip_route_me_harder(skb, RTN_UNSPEC);
+			return ip_route_me_harder(net, skb, RTN_UNSPEC);
 	}
 	return 0;
 }
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 8f87fc3..2dad3e1 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -247,10 +247,10 @@
 }
 
 unsigned int arpt_do_table(struct sk_buff *skb,
-			   unsigned int hook,
 			   const struct nf_hook_state *state,
 			   struct xt_table *table)
 {
+	unsigned int hook = state->hook;
 	static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
 	unsigned int verdict = NF_DROP;
 	const struct arphdr *arp;
@@ -285,6 +285,7 @@
 	 */
 	e = get_entry(table_base, private->hook_entry[hook]);
 
+	acpar.net     = state->net;
 	acpar.in      = state->in;
 	acpar.out     = state->out;
 	acpar.hooknum = hook;
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index d217e4c..1897ee1 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -27,11 +27,10 @@
 
 /* The work comes in here from netfilter.c */
 static unsigned int
-arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+arptable_filter_hook(void *priv, struct sk_buff *skb,
 		     const struct nf_hook_state *state)
 {
-	return arpt_do_table(skb, ops->hooknum, state,
-			     state->net->ipv4.arptable_filter);
+	return arpt_do_table(skb, state, state->net->ipv4.arptable_filter);
 }
 
 static struct nf_hook_ops *arpfilter_ops __read_mostly;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 5d514ea..42d0946 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -285,10 +285,10 @@
 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
 unsigned int
 ipt_do_table(struct sk_buff *skb,
-	     unsigned int hook,
 	     const struct nf_hook_state *state,
 	     struct xt_table *table)
 {
+	unsigned int hook = state->hook;
 	static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
 	const struct iphdr *ip;
 	/* Initializing verdict to NF_DROP keeps gcc happy. */
@@ -315,6 +315,7 @@
 	acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
 	acpar.thoff   = ip_hdrlen(skb);
 	acpar.hotdrop = false;
+	acpar.net     = state->net;
 	acpar.in      = state->in;
 	acpar.out     = state->out;
 	acpar.family  = NFPROTO_IPV4;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 69157d8..3f32c03 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -507,7 +507,7 @@
 #endif
 
 static unsigned int
-arp_mangle(const struct nf_hook_ops *ops,
+arp_mangle(void *priv,
 	   struct sk_buff *skb,
 	   const struct nf_hook_state *state)
 {
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 87907d4..1d16c0f 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -59,7 +59,7 @@
 		nf_send_unreach(skb, ICMP_PKT_FILTERED, hook);
 		break;
 	case IPT_TCP_RESET:
-		nf_send_reset(skb, hook);
+		nf_send_reset(par->net, skb, hook);
 	case IPT_ICMP_ECHOREPLY:
 		/* Doesn't happen. */
 		break;
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index f471a06..f1a8df8 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -39,11 +39,14 @@
 }
 
 static void
-synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
+synproxy_send_tcp(const struct synproxy_net *snet,
+		  const struct sk_buff *skb, struct sk_buff *nskb,
 		  struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
 		  struct iphdr *niph, struct tcphdr *nth,
 		  unsigned int tcp_hdr_size)
 {
+	struct net *net = nf_ct_net(snet->tmpl);
+
 	nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0);
 	nskb->ip_summed   = CHECKSUM_PARTIAL;
 	nskb->csum_start  = (unsigned char *)nth - nskb->head;
@@ -51,7 +54,7 @@
 
 	skb_dst_set_noref(nskb, skb_dst(skb));
 	nskb->protocol = htons(ETH_P_IP);
-	if (ip_route_me_harder(nskb, RTN_UNSPEC))
+	if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
 		goto free_nskb;
 
 	if (nfct) {
@@ -60,7 +63,7 @@
 		nf_conntrack_get(nfct);
 	}
 
-	ip_local_out(nskb);
+	ip_local_out(net, nskb->sk, nskb);
 	return;
 
 free_nskb:
@@ -68,7 +71,8 @@
 }
 
 static void
-synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
+synproxy_send_client_synack(const struct synproxy_net *snet,
+			    const struct sk_buff *skb, const struct tcphdr *th,
 			    const struct synproxy_options *opts)
 {
 	struct sk_buff *nskb;
@@ -104,7 +108,7 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+	synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
 			  niph, nth, tcp_hdr_size);
 }
 
@@ -148,7 +152,7 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+	synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
 			  niph, nth, tcp_hdr_size);
 }
 
@@ -188,7 +192,7 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+	synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
 }
 
 static void
@@ -226,7 +230,7 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+	synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
 	                  niph, nth, tcp_hdr_size);
 }
 
@@ -258,7 +262,7 @@
 synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_synproxy_info *info = par->targinfo;
-	struct synproxy_net *snet = synproxy_pernet(dev_net(par->in));
+	struct synproxy_net *snet = synproxy_pernet(par->net);
 	struct synproxy_options opts = {};
 	struct tcphdr *th, _th;
 
@@ -287,7 +291,7 @@
 					  XT_SYNPROXY_OPT_SACK_PERM |
 					  XT_SYNPROXY_OPT_ECN);
 
-		synproxy_send_client_synack(skb, th, &opts);
+		synproxy_send_client_synack(snet, skb, th, &opts);
 		return NF_DROP;
 
 	} else if (th->ack && !(th->fin || th->rst || th->syn)) {
@@ -299,7 +303,7 @@
 	return XT_CONTINUE;
 }
 
-static unsigned int ipv4_synproxy_hook(const struct nf_hook_ops *ops,
+static unsigned int ipv4_synproxy_hook(void *priv,
 				       struct sk_buff *skb,
 				       const struct nf_hook_state *nhs)
 {
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 8618fd1..74dd667 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -32,12 +32,11 @@
 	return addr;
 }
 
-static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
+static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4,
 				const struct net_device *dev, u8 flags)
 {
 	struct fib_result res;
 	bool dev_match;
-	struct net *net = dev_net(dev);
 	int ret __maybe_unused;
 
 	if (fib_lookup(net, fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE))
@@ -98,7 +97,7 @@
 	flow.flowi4_tos = RT_TOS(iph->tos);
 	flow.flowi4_scope = RT_SCOPE_UNIVERSE;
 
-	return rpfilter_lookup_reverse(&flow, par->in, info->flags) ^ invert;
+	return rpfilter_lookup_reverse(par->net, &flow, par->in, info->flags) ^ invert;
 }
 
 static int rpfilter_check(const struct xt_mtchk_param *par)
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 32feff3..397ef2d 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -33,17 +33,16 @@
 };
 
 static unsigned int
-iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+iptable_filter_hook(void *priv, struct sk_buff *skb,
 		    const struct nf_hook_state *state)
 {
-	if (ops->hooknum == NF_INET_LOCAL_OUT &&
+	if (state->hook == NF_INET_LOCAL_OUT &&
 	    (skb->len < sizeof(struct iphdr) ||
 	     ip_hdrlen(skb) < sizeof(struct iphdr)))
 		/* root is playing with raw sockets. */
 		return NF_ACCEPT;
 
-	return ipt_do_table(skb, ops->hooknum, state,
-			    state->net->ipv4.iptable_filter);
+	return ipt_do_table(skb, state, state->net->ipv4.iptable_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 4a5150f..ba5d392 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -58,8 +58,7 @@
 	daddr = iph->daddr;
 	tos = iph->tos;
 
-	ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, state,
-			   state->net->ipv4.iptable_mangle);
+	ret = ipt_do_table(skb, state, state->net->ipv4.iptable_mangle);
 	/* Reroute for ANY change. */
 	if (ret != NF_DROP && ret != NF_STOLEN) {
 		iph = ip_hdr(skb);
@@ -68,7 +67,7 @@
 		    iph->daddr != daddr ||
 		    skb->mark != mark ||
 		    iph->tos != tos) {
-			err = ip_route_me_harder(skb, RTN_UNSPEC);
+			err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
 			if (err < 0)
 				ret = NF_DROP_ERR(err);
 		}
@@ -79,18 +78,17 @@
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-iptable_mangle_hook(const struct nf_hook_ops *ops,
+iptable_mangle_hook(void *priv,
 		     struct sk_buff *skb,
 		     const struct nf_hook_state *state)
 {
-	if (ops->hooknum == NF_INET_LOCAL_OUT)
+	if (state->hook == NF_INET_LOCAL_OUT)
 		return ipt_mangle_out(skb, state);
-	if (ops->hooknum == NF_INET_POST_ROUTING)
-		return ipt_do_table(skb, ops->hooknum, state,
+	if (state->hook == NF_INET_POST_ROUTING)
+		return ipt_do_table(skb, state,
 				    state->net->ipv4.iptable_mangle);
 	/* PREROUTING/INPUT/FORWARD: */
-	return ipt_do_table(skb, ops->hooknum, state,
-			    state->net->ipv4.iptable_mangle);
+	return ipt_do_table(skb, state, state->net->ipv4.iptable_mangle);
 }
 
 static struct nf_hook_ops *mangle_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 4f4c64f..3a2e4d8 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -28,41 +28,40 @@
 	.af		= NFPROTO_IPV4,
 };
 
-static unsigned int iptable_nat_do_chain(const struct nf_hook_ops *ops,
+static unsigned int iptable_nat_do_chain(void *priv,
 					 struct sk_buff *skb,
 					 const struct nf_hook_state *state,
 					 struct nf_conn *ct)
 {
-	return ipt_do_table(skb, ops->hooknum, state,
-			    state->net->ipv4.nat_table);
+	return ipt_do_table(skb, state, state->net->ipv4.nat_table);
 }
 
-static unsigned int iptable_nat_ipv4_fn(const struct nf_hook_ops *ops,
+static unsigned int iptable_nat_ipv4_fn(void *priv,
 					struct sk_buff *skb,
 					const struct nf_hook_state *state)
 {
-	return nf_nat_ipv4_fn(ops, skb, state, iptable_nat_do_chain);
+	return nf_nat_ipv4_fn(priv, skb, state, iptable_nat_do_chain);
 }
 
-static unsigned int iptable_nat_ipv4_in(const struct nf_hook_ops *ops,
+static unsigned int iptable_nat_ipv4_in(void *priv,
 					struct sk_buff *skb,
 					const struct nf_hook_state *state)
 {
-	return nf_nat_ipv4_in(ops, skb, state, iptable_nat_do_chain);
+	return nf_nat_ipv4_in(priv, skb, state, iptable_nat_do_chain);
 }
 
-static unsigned int iptable_nat_ipv4_out(const struct nf_hook_ops *ops,
+static unsigned int iptable_nat_ipv4_out(void *priv,
 					 struct sk_buff *skb,
 					 const struct nf_hook_state *state)
 {
-	return nf_nat_ipv4_out(ops, skb, state, iptable_nat_do_chain);
+	return nf_nat_ipv4_out(priv, skb, state, iptable_nat_do_chain);
 }
 
-static unsigned int iptable_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
+static unsigned int iptable_nat_ipv4_local_fn(void *priv,
 					      struct sk_buff *skb,
 					      const struct nf_hook_state *state)
 {
-	return nf_nat_ipv4_local_fn(ops, skb, state, iptable_nat_do_chain);
+	return nf_nat_ipv4_local_fn(priv, skb, state, iptable_nat_do_chain);
 }
 
 static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 20126e4..1ba0281 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -20,17 +20,16 @@
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+iptable_raw_hook(void *priv, struct sk_buff *skb,
 		 const struct nf_hook_state *state)
 {
-	if (ops->hooknum == NF_INET_LOCAL_OUT &&
+	if (state->hook == NF_INET_LOCAL_OUT &&
 	    (skb->len < sizeof(struct iphdr) ||
 	     ip_hdrlen(skb) < sizeof(struct iphdr)))
 		/* root is playing with raw sockets. */
 		return NF_ACCEPT;
 
-	return ipt_do_table(skb, ops->hooknum, state,
-			    state->net->ipv4.iptable_raw);
+	return ipt_do_table(skb, state, state->net->ipv4.iptable_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index 82fefd6..f534e2f 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -37,17 +37,16 @@
 };
 
 static unsigned int
-iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+iptable_security_hook(void *priv, struct sk_buff *skb,
 		      const struct nf_hook_state *state)
 {
-	if (ops->hooknum == NF_INET_LOCAL_OUT &&
+	if (state->hook == NF_INET_LOCAL_OUT &&
 	    (skb->len < sizeof(struct iphdr) ||
 	     ip_hdrlen(skb) < sizeof(struct iphdr)))
 		/* Somebody is playing with raw sockets. */
 		return NF_ACCEPT;
 
-	return ipt_do_table(skb, ops->hooknum, state,
-			    state->net->ipv4.iptable_security);
+	return ipt_do_table(skb, state, state->net->ipv4.iptable_security);
 }
 
 static struct nf_hook_ops *sectbl_ops __read_mostly;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 9564684..752fb40 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -92,7 +92,7 @@
 	return NF_ACCEPT;
 }
 
-static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
+static unsigned int ipv4_helper(void *priv,
 				struct sk_buff *skb,
 				const struct nf_hook_state *state)
 {
@@ -119,7 +119,7 @@
 			    ct, ctinfo);
 }
 
-static unsigned int ipv4_confirm(const struct nf_hook_ops *ops,
+static unsigned int ipv4_confirm(void *priv,
 				 struct sk_buff *skb,
 				 const struct nf_hook_state *state)
 {
@@ -143,14 +143,14 @@
 	return nf_conntrack_confirm(skb);
 }
 
-static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops,
+static unsigned int ipv4_conntrack_in(void *priv,
 				      struct sk_buff *skb,
 				      const struct nf_hook_state *state)
 {
-	return nf_conntrack_in(state->net, PF_INET, ops->hooknum, skb);
+	return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
 }
 
-static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops,
+static unsigned int ipv4_conntrack_local(void *priv,
 					 struct sk_buff *skb,
 					 const struct nf_hook_state *state)
 {
@@ -158,7 +158,7 @@
 	if (skb->len < sizeof(struct iphdr) ||
 	    ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
-	return nf_conntrack_in(state->net, PF_INET, ops->hooknum, skb);
+	return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
 }
 
 /* Connection tracking may drop packets, but never alters them, so
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index cdde3ec..c567e1b 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -30,7 +30,7 @@
 }
 
 static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
-			      struct nf_conntrack_tuple *tuple)
+			      struct net *net, struct nf_conntrack_tuple *tuple)
 {
 	const struct icmphdr *hp;
 	struct icmphdr _hdr;
@@ -144,7 +144,7 @@
 	if (!nf_ct_get_tuplepr(skb,
 			       skb_network_offset(skb) + ip_hdrlen(skb)
 						       + sizeof(struct icmphdr),
-			       PF_INET, &origtuple)) {
+			       PF_INET, net, &origtuple)) {
 		pr_debug("icmp_error_message: failed to get tuple\n");
 		return -NF_ACCEPT;
 	}
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 9306ec4..bf25f45 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -22,14 +22,15 @@
 #endif
 #include <net/netfilter/nf_conntrack_zones.h>
 
-static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
+static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
+				   u_int32_t user)
 {
 	int err;
 
 	skb_orphan(skb);
 
 	local_bh_disable();
-	err = ip_defrag(skb, user);
+	err = ip_defrag(net, skb, user);
 	local_bh_enable();
 
 	if (!err) {
@@ -61,7 +62,7 @@
 		return IP_DEFRAG_CONNTRACK_OUT + zone_id;
 }
 
-static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
+static unsigned int ipv4_conntrack_defrag(void *priv,
 					  struct sk_buff *skb,
 					  const struct nf_hook_state *state)
 {
@@ -83,9 +84,9 @@
 	/* Gather fragments. */
 	if (ip_is_fragment(ip_hdr(skb))) {
 		enum ip_defrag_users user =
-			nf_ct_defrag_user(ops->hooknum, skb);
+			nf_ct_defrag_user(state->hook, skb);
 
-		if (nf_ct_ipv4_gather_frags(skb, user))
+		if (nf_ct_ipv4_gather_frags(state->net, skb, user))
 			return NF_STOLEN;
 	}
 	return NF_ACCEPT;
diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c
index 2d79e6e..ceb1873 100644
--- a/net/ipv4/netfilter/nf_dup_ipv4.c
+++ b/net/ipv4/netfilter/nf_dup_ipv4.c
@@ -23,25 +23,10 @@
 #include <net/netfilter/nf_conntrack.h>
 #endif
 
-static struct net *pick_net(struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_NS
-	const struct dst_entry *dst;
-
-	if (skb->dev != NULL)
-		return dev_net(skb->dev);
-	dst = skb_dst(skb);
-	if (dst != NULL && dst->dev != NULL)
-		return dev_net(dst->dev);
-#endif
-	return &init_net;
-}
-
-static bool nf_dup_ipv4_route(struct sk_buff *skb, const struct in_addr *gw,
-			      int oif)
+static bool nf_dup_ipv4_route(struct net *net, struct sk_buff *skb,
+			      const struct in_addr *gw, int oif)
 {
 	const struct iphdr *iph = ip_hdr(skb);
-	struct net *net = pick_net(skb);
 	struct rtable *rt;
 	struct flowi4 fl4;
 
@@ -65,7 +50,7 @@
 	return true;
 }
 
-void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
 		 const struct in_addr *gw, int oif)
 {
 	struct iphdr *iph;
@@ -105,9 +90,9 @@
 		--iph->ttl;
 	ip_send_check(iph);
 
-	if (nf_dup_ipv4_route(skb, gw, oif)) {
+	if (nf_dup_ipv4_route(net, skb, gw, oif)) {
 		__this_cpu_write(nf_skb_duplicated, true);
-		ip_local_out(skb);
+		ip_local_out(net, skb->sk, skb);
 		__this_cpu_write(nf_skb_duplicated, false);
 	} else {
 		kfree_skb(skb);
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index 22f4579..5075b7e 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -255,9 +255,9 @@
 EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
 
 unsigned int
-nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
 	       const struct nf_hook_state *state,
-	       unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+	       unsigned int (*do_chain)(void *priv,
 					struct sk_buff *skb,
 					const struct nf_hook_state *state,
 					struct nf_conn *ct))
@@ -266,7 +266,7 @@
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn_nat *nat;
 	/* maniptype == SRC for postrouting. */
-	enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
+	enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
 
 	/* We never see fragments: conntrack defrags on pre-routing
 	 * and local-out, and nf_nat_out protects post-routing.
@@ -295,7 +295,7 @@
 	case IP_CT_RELATED_REPLY:
 		if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
 			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
-							   ops->hooknum))
+							   state->hook))
 				return NF_DROP;
 			else
 				return NF_ACCEPT;
@@ -308,21 +308,21 @@
 		if (!nf_nat_initialized(ct, maniptype)) {
 			unsigned int ret;
 
-			ret = do_chain(ops, skb, state, ct);
+			ret = do_chain(priv, skb, state, ct);
 			if (ret != NF_ACCEPT)
 				return ret;
 
-			if (nf_nat_initialized(ct, HOOK2MANIP(ops->hooknum)))
+			if (nf_nat_initialized(ct, HOOK2MANIP(state->hook)))
 				break;
 
-			ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
+			ret = nf_nat_alloc_null_binding(ct, state->hook);
 			if (ret != NF_ACCEPT)
 				return ret;
 		} else {
 			pr_debug("Already setup manip %s for ct %p\n",
 				 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
 				 ct);
-			if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat,
+			if (nf_nat_oif_changed(state->hook, ctinfo, nat,
 					       state->out))
 				goto oif_changed;
 		}
@@ -332,11 +332,11 @@
 		/* ESTABLISHED */
 		NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
 			     ctinfo == IP_CT_ESTABLISHED_REPLY);
-		if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
+		if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
 			goto oif_changed;
 	}
 
-	return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+	return nf_nat_packet(ct, ctinfo, state->hook, skb);
 
 oif_changed:
 	nf_ct_kill_acct(ct, ctinfo, skb);
@@ -345,9 +345,9 @@
 EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn);
 
 unsigned int
-nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
 	       const struct nf_hook_state *state,
-	       unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+	       unsigned int (*do_chain)(void *priv,
 					 struct sk_buff *skb,
 					 const struct nf_hook_state *state,
 					 struct nf_conn *ct))
@@ -355,7 +355,7 @@
 	unsigned int ret;
 	__be32 daddr = ip_hdr(skb)->daddr;
 
-	ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
+	ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    daddr != ip_hdr(skb)->daddr)
 		skb_dst_drop(skb);
@@ -365,9 +365,9 @@
 EXPORT_SYMBOL_GPL(nf_nat_ipv4_in);
 
 unsigned int
-nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
 		const struct nf_hook_state *state,
-		unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+		unsigned int (*do_chain)(void *priv,
 					  struct sk_buff *skb,
 					  const struct nf_hook_state *state,
 					  struct nf_conn *ct))
@@ -384,7 +384,7 @@
 	    ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
 
-	ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
+	ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
 #ifdef CONFIG_XFRM
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
@@ -396,7 +396,7 @@
 		    (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
 		     ct->tuplehash[dir].tuple.src.u.all !=
 		     ct->tuplehash[!dir].tuple.dst.u.all)) {
-			err = nf_xfrm_me_harder(skb, AF_INET);
+			err = nf_xfrm_me_harder(state->net, skb, AF_INET);
 			if (err < 0)
 				ret = NF_DROP_ERR(err);
 		}
@@ -407,9 +407,9 @@
 EXPORT_SYMBOL_GPL(nf_nat_ipv4_out);
 
 unsigned int
-nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
 		     const struct nf_hook_state *state,
-		     unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+		     unsigned int (*do_chain)(void *priv,
 					       struct sk_buff *skb,
 					       const struct nf_hook_state *state,
 					       struct nf_conn *ct))
@@ -424,14 +424,14 @@
 	    ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
 
-	ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
+	ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
 		enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
 
 		if (ct->tuplehash[dir].tuple.dst.u3.ip !=
 		    ct->tuplehash[!dir].tuple.src.u3.ip) {
-			err = ip_route_me_harder(skb, RTN_UNSPEC);
+			err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
 			if (err < 0)
 				ret = NF_DROP_ERR(err);
 		}
@@ -440,7 +440,7 @@
 			 ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
 			 ct->tuplehash[dir].tuple.dst.u.all !=
 			 ct->tuplehash[!dir].tuple.src.u.all) {
-			err = nf_xfrm_me_harder(skb, AF_INET);
+			err = nf_xfrm_me_harder(state->net, skb, AF_INET);
 			if (err < 0)
 				ret = NF_DROP_ERR(err);
 		}
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 3262e41..c747b2d 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -99,7 +99,7 @@
 EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
 
 /* Send RST reply */
-void nf_send_reset(struct sk_buff *oldskb, int hook)
+void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
 {
 	struct sk_buff *nskb;
 	const struct iphdr *oiph;
@@ -129,7 +129,7 @@
 				   ip4_dst_hoplimit(skb_dst(nskb)));
 	nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
 
-	if (ip_route_me_harder(nskb, RTN_UNSPEC))
+	if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
 		goto free_nskb;
 
 	/* "Never happens" */
@@ -157,7 +157,7 @@
 		dev_queue_xmit(nskb);
 	} else
 #endif
-		ip_local_out(nskb);
+		ip_local_out(net, nskb->sk, nskb);
 
 	return;
 
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
index 8412268..9d09d4f 100644
--- a/net/ipv4/netfilter/nf_tables_arp.c
+++ b/net/ipv4/netfilter/nf_tables_arp.c
@@ -15,15 +15,15 @@
 #include <net/netfilter/nf_tables.h>
 
 static unsigned int
-nft_do_chain_arp(const struct nf_hook_ops *ops,
+nft_do_chain_arp(void *priv,
 		  struct sk_buff *skb,
 		  const struct nf_hook_state *state)
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo(&pkt, ops, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
 
-	return nft_do_chain(&pkt, ops);
+	return nft_do_chain(&pkt, priv);
 }
 
 static struct nft_af_info nft_af_arp __read_mostly = {
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c
index aa180d3..ca9dc3c 100644
--- a/net/ipv4/netfilter/nf_tables_ipv4.c
+++ b/net/ipv4/netfilter/nf_tables_ipv4.c
@@ -18,18 +18,18 @@
 #include <net/ip.h>
 #include <net/netfilter/nf_tables_ipv4.h>
 
-static unsigned int nft_do_chain_ipv4(const struct nf_hook_ops *ops,
+static unsigned int nft_do_chain_ipv4(void *priv,
 				      struct sk_buff *skb,
 				      const struct nf_hook_state *state)
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
+	nft_set_pktinfo_ipv4(&pkt, skb, state);
 
-	return nft_do_chain(&pkt, ops);
+	return nft_do_chain(&pkt, priv);
 }
 
-static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
+static unsigned int nft_ipv4_output(void *priv,
 				    struct sk_buff *skb,
 				    const struct nf_hook_state *state)
 {
@@ -41,7 +41,7 @@
 		return NF_ACCEPT;
 	}
 
-	return nft_do_chain_ipv4(ops, skb, state);
+	return nft_do_chain_ipv4(priv, skb, state);
 }
 
 struct nft_af_info nft_af_ipv4 __read_mostly = {
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
index bf5c30a..f5c66a7 100644
--- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -26,44 +26,44 @@
 #include <net/netfilter/nf_nat_l3proto.h>
 #include <net/ip.h>
 
-static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops,
+static unsigned int nft_nat_do_chain(void *priv,
 				      struct sk_buff *skb,
 				      const struct nf_hook_state *state,
 				      struct nf_conn *ct)
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
+	nft_set_pktinfo_ipv4(&pkt, skb, state);
 
-	return nft_do_chain(&pkt, ops);
+	return nft_do_chain(&pkt, priv);
 }
 
-static unsigned int nft_nat_ipv4_fn(const struct nf_hook_ops *ops,
+static unsigned int nft_nat_ipv4_fn(void *priv,
 				    struct sk_buff *skb,
 				    const struct nf_hook_state *state)
 {
-	return nf_nat_ipv4_fn(ops, skb, state, nft_nat_do_chain);
+	return nf_nat_ipv4_fn(priv, skb, state, nft_nat_do_chain);
 }
 
-static unsigned int nft_nat_ipv4_in(const struct nf_hook_ops *ops,
+static unsigned int nft_nat_ipv4_in(void *priv,
 				    struct sk_buff *skb,
 				    const struct nf_hook_state *state)
 {
-	return nf_nat_ipv4_in(ops, skb, state, nft_nat_do_chain);
+	return nf_nat_ipv4_in(priv, skb, state, nft_nat_do_chain);
 }
 
-static unsigned int nft_nat_ipv4_out(const struct nf_hook_ops *ops,
+static unsigned int nft_nat_ipv4_out(void *priv,
 				     struct sk_buff *skb,
 				     const struct nf_hook_state *state)
 {
-	return nf_nat_ipv4_out(ops, skb, state, nft_nat_do_chain);
+	return nf_nat_ipv4_out(priv, skb, state, nft_nat_do_chain);
 }
 
-static unsigned int nft_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
+static unsigned int nft_nat_ipv4_local_fn(void *priv,
 					  struct sk_buff *skb,
 					  const struct nf_hook_state *state)
 {
-	return nf_nat_ipv4_local_fn(ops, skb, state, nft_nat_do_chain);
+	return nf_nat_ipv4_local_fn(priv, skb, state, nft_nat_do_chain);
 }
 
 static const struct nf_chain_type nft_chain_nat_ipv4 = {
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
index e335b0a..2375b0a 100644
--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -21,7 +21,7 @@
 #include <net/route.h>
 #include <net/ip.h>
 
-static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
+static unsigned int nf_route_table_hook(void *priv,
 					struct sk_buff *skb,
 					const struct nf_hook_state *state)
 {
@@ -37,7 +37,7 @@
 	    ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
 
-	nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
+	nft_set_pktinfo_ipv4(&pkt, skb, state);
 
 	mark = skb->mark;
 	iph = ip_hdr(skb);
@@ -45,7 +45,7 @@
 	daddr = iph->daddr;
 	tos = iph->tos;
 
-	ret = nft_do_chain(&pkt, ops);
+	ret = nft_do_chain(&pkt, priv);
 	if (ret != NF_DROP && ret != NF_QUEUE) {
 		iph = ip_hdr(skb);
 
@@ -53,7 +53,7 @@
 		    iph->daddr != daddr ||
 		    skb->mark != mark ||
 		    iph->tos != tos)
-			if (ip_route_me_harder(skb, RTN_UNSPEC))
+			if (ip_route_me_harder(state->net, skb, RTN_UNSPEC))
 				ret = NF_DROP;
 	}
 	return ret;
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
index b45932d..bf855e6 100644
--- a/net/ipv4/netfilter/nft_dup_ipv4.c
+++ b/net/ipv4/netfilter/nft_dup_ipv4.c
@@ -30,7 +30,7 @@
 	};
 	int oif = regs->data[priv->sreg_dev];
 
-	nf_dup_ipv4(pkt->skb, pkt->ops->hooknum, &gw, oif);
+	nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif);
 }
 
 static int nft_dup_ipv4_init(const struct nft_ctx *ctx,
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
index 40e414c..b72ffc5 100644
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -26,7 +26,7 @@
 	memset(&range, 0, sizeof(range));
 	range.flags = priv->flags;
 
-	regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum,
+	regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, pkt->hook,
 						    &range, pkt->out);
 }
 
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
index d8d795d..c09d438 100644
--- a/net/ipv4/netfilter/nft_redir_ipv4.c
+++ b/net/ipv4/netfilter/nft_redir_ipv4.c
@@ -36,7 +36,7 @@
 	mr.range[0].flags |= priv->flags;
 
 	regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &mr,
-						  pkt->ops->hooknum);
+						  pkt->hook);
 }
 
 static struct nft_expr_type nft_redir_ipv4_type;
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index b07e58b..c24f41c 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -27,11 +27,10 @@
 
 	switch (priv->type) {
 	case NFT_REJECT_ICMP_UNREACH:
-		nf_send_unreach(pkt->skb, priv->icmp_code,
-				pkt->ops->hooknum);
+		nf_send_unreach(pkt->skb, priv->icmp_code, pkt->hook);
 		break;
 	case NFT_REJECT_TCP_RST:
-		nf_send_reset(pkt->skb, pkt->ops->hooknum);
+		nf_send_reset(pkt->net, pkt->skb, pkt->hook);
 		break;
 	default:
 		break;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 28ef8a9..8c0d0bd 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -413,7 +413,7 @@
 
 	err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
 		      net, sk, skb, NULL, rt->dst.dev,
-		      dst_output_okfn);
+		      dst_output);
 	if (err > 0)
 		err = net_xmit_errno(err);
 	if (err)
@@ -484,6 +484,7 @@
 static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
 	struct inet_sock *inet = inet_sk(sk);
+	struct net *net = sock_net(sk);
 	struct ipcm_cookie ipc;
 	struct rtable *rt = NULL;
 	struct flowi4 fl4;
@@ -543,7 +544,7 @@
 	ipc.oif = sk->sk_bound_dev_if;
 
 	if (msg->msg_controllen) {
-		err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
+		err = ip_cmsg_send(net, msg, &ipc, false);
 		if (err)
 			goto out;
 		if (ipc.opt)
@@ -598,6 +599,9 @@
 			    (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
 			   daddr, saddr, 0, 0);
 
+	if (!saddr && ipc.oif)
+		l3mdev_get_saddr(net, ipc.oif, &fl4);
+
 	if (!inet->hdrincl) {
 		rfv.msg = msg;
 		rfv.hlen = 0;
@@ -608,7 +612,7 @@
 	}
 
 	security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
-	rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
+	rt = ip_route_output_flow(net, &fl4, sk);
 	if (IS_ERR(rt)) {
 		err = PTR_ERR(rt);
 		rt = NULL;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 80f7c5b..85f184e 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -112,7 +112,7 @@
 #endif
 #include <net/secure_seq.h>
 #include <net/ip_tunnels.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 
 #define RT_FL_TOS(oldflp4) \
 	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -847,7 +847,7 @@
 		return;
 	}
 	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
-	vif = vrf_master_ifindex_rcu(rt->dst.dev);
+	vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
 	rcu_read_unlock();
 
 	net = dev_net(rt->dst.dev);
@@ -941,7 +941,7 @@
 	}
 
 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
-			       vrf_master_ifindex(skb->dev), 1);
+			       l3mdev_master_ifindex(skb->dev), 1);
 
 	send = true;
 	if (peer) {
@@ -1152,7 +1152,7 @@
 		dst_set_expires(&rt->dst, 0);
 }
 
-static int ip_rt_bug(struct sock *sk, struct sk_buff *skb)
+static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	pr_debug("%s: %pI4 -> %pI4, %s\n",
 		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
@@ -1487,9 +1487,8 @@
 	    skb->protocol != htons(ETH_P_IP))
 		goto e_inval;
 
-	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
-		if (ipv4_is_loopback(saddr))
-			goto e_inval;
+	if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
+		goto e_inval;
 
 	if (ipv4_is_zeronet(saddr)) {
 		if (!ipv4_is_local_multicast(daddr))
@@ -1652,6 +1651,48 @@
 	return err;
 }
 
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+
+/* To make ICMP packets follow the right flow, the multipath hash is
+ * calculated from the inner IP addresses in reverse order.
+ */
+static int ip_multipath_icmp_hash(struct sk_buff *skb)
+{
+	const struct iphdr *outer_iph = ip_hdr(skb);
+	struct icmphdr _icmph;
+	const struct icmphdr *icmph;
+	struct iphdr _inner_iph;
+	const struct iphdr *inner_iph;
+
+	if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
+		goto standard_hash;
+
+	icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
+				   &_icmph);
+	if (!icmph)
+		goto standard_hash;
+
+	if (icmph->type != ICMP_DEST_UNREACH &&
+	    icmph->type != ICMP_REDIRECT &&
+	    icmph->type != ICMP_TIME_EXCEEDED &&
+	    icmph->type != ICMP_PARAMETERPROB) {
+		goto standard_hash;
+	}
+
+	inner_iph = skb_header_pointer(skb,
+				       outer_iph->ihl * 4 + sizeof(_icmph),
+				       sizeof(_inner_iph), &_inner_iph);
+	if (!inner_iph)
+		goto standard_hash;
+
+	return fib_multipath_hash(inner_iph->daddr, inner_iph->saddr);
+
+standard_hash:
+	return fib_multipath_hash(outer_iph->saddr, outer_iph->daddr);
+}
+
+#endif /* CONFIG_IP_ROUTE_MULTIPATH */
+
 static int ip_mkroute_input(struct sk_buff *skb,
 			    struct fib_result *res,
 			    const struct flowi4 *fl4,
@@ -1659,8 +1700,15 @@
 			    __be32 daddr, __be32 saddr, u32 tos)
 {
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-	if (res->fi && res->fi->fib_nhs > 1)
-		fib_select_multipath(res);
+	if (res->fi && res->fi->fib_nhs > 1) {
+		int h;
+
+		if (unlikely(ip_hdr(skb)->protocol == IPPROTO_ICMP))
+			h = ip_multipath_icmp_hash(skb);
+		else
+			h = fib_multipath_hash(saddr, daddr);
+		fib_select_multipath(res, h);
+	}
 #endif
 
 	/* create a routing cache entry */
@@ -1740,10 +1788,11 @@
 	 *	Now we are ready to route packet.
 	 */
 	fl4.flowi4_oif = 0;
-	fl4.flowi4_iif = vrf_master_ifindex_rcu(dev) ? : dev->ifindex;
+	fl4.flowi4_iif = l3mdev_fib_oif_rcu(dev);
 	fl4.flowi4_mark = skb->mark;
 	fl4.flowi4_tos = tos;
 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+	fl4.flowi4_flags = 0;
 	fl4.daddr = daddr;
 	fl4.saddr = saddr;
 	err = fib_lookup(net, &fl4, &res, 0);
@@ -1760,7 +1809,7 @@
 		err = fib_validate_source(skb, saddr, daddr, tos,
 					  0, dev, in_dev, &itag);
 		if (err < 0)
-			goto martian_source_keep_err;
+			goto martian_source;
 		goto local_input;
 	}
 
@@ -1782,7 +1831,7 @@
 		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
 					  in_dev, &itag);
 		if (err < 0)
-			goto martian_source_keep_err;
+			goto martian_source;
 	}
 	flags |= RTCF_BROADCAST;
 	res.type = RTN_BROADCAST;
@@ -1858,8 +1907,6 @@
 	goto out;
 
 martian_source:
-	err = -EINVAL;
-martian_source_keep_err:
 	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
 	goto out;
 }
@@ -2028,7 +2075,8 @@
  * Major route resolver routine.
  */
 
-struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
+struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
+					  int mp_hash)
 {
 	struct net_device *dev_out = NULL;
 	__u8 tos = RT_FL_TOS(fl4);
@@ -2036,6 +2084,7 @@
 	struct fib_result res;
 	struct rtable *rth;
 	int orig_oif;
+	int err = -ENETUNREACH;
 
 	res.tclassid	= 0;
 	res.fi		= NULL;
@@ -2126,11 +2175,10 @@
 				fl4->saddr = inet_select_addr(dev_out, 0,
 							      RT_SCOPE_HOST);
 		}
-		if (netif_is_vrf(dev_out) &&
-		    !(fl4->flowi4_flags & FLOWI_FLAG_VRFSRC)) {
-			rth = vrf_dev_get_rth(dev_out);
+
+		rth = l3mdev_get_rtable(dev_out, fl4);
+		if (rth)
 			goto out;
-		}
 	}
 
 	if (!fl4->daddr) {
@@ -2144,10 +2192,12 @@
 		goto make_route;
 	}
 
-	if (fib_lookup(net, fl4, &res, 0)) {
+	err = fib_lookup(net, fl4, &res, 0);
+	if (err) {
 		res.fi = NULL;
 		res.table = NULL;
-		if (fl4->flowi4_oif) {
+		if (fl4->flowi4_oif &&
+		    !netif_index_is_l3_master(net, fl4->flowi4_oif)) {
 			/* Apparently, routing tables are wrong. Assume,
 			   that the destination is on link.
 
@@ -2172,7 +2222,7 @@
 			res.type = RTN_UNICAST;
 			goto make_route;
 		}
-		rth = ERR_PTR(-ENETUNREACH);
+		rth = ERR_PTR(err);
 		goto out;
 	}
 
@@ -2189,18 +2239,7 @@
 		goto make_route;
 	}
 
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
-	if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
-		fib_select_multipath(&res);
-	else
-#endif
-	if (!res.prefixlen &&
-	    res.table->tb_num_default > 1 &&
-	    res.type == RTN_UNICAST && !fl4->flowi4_oif)
-		fib_select_default(fl4, &res);
-
-	if (!fl4->saddr)
-		fl4->saddr = FIB_RES_PREFSRC(net, res);
+	fib_select_path(net, &res, fl4, mp_hash);
 
 	dev_out = FIB_RES_DEV(res);
 	fl4->flowi4_oif = dev_out->ifindex;
@@ -2213,7 +2252,7 @@
 	rcu_read_unlock();
 	return rth;
 }
-EXPORT_SYMBOL_GPL(__ip_route_output_key);
+EXPORT_SYMBOL_GPL(__ip_route_output_key_hash);
 
 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
 {
@@ -2265,7 +2304,7 @@
 
 		new->__use = 1;
 		new->input = dst_discard;
-		new->output = dst_discard_sk;
+		new->output = dst_discard_out;
 
 		new->dev = ort->dst.dev;
 		if (new->dev)
@@ -2291,7 +2330,7 @@
 }
 
 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
-				    struct sock *sk)
+				    const struct sock *sk)
 {
 	struct rtable *rt = __ip_route_output_key(net, flp4);
 
@@ -2469,6 +2508,9 @@
 	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
 	fl4.flowi4_mark = mark;
 
+	if (netif_index_is_l3_master(net, fl4.flowi4_oif))
+		fl4.flowi4_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF;
+
 	if (iif) {
 		struct net_device *dev;
 
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index d70b1f6..4c0892b 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -192,15 +192,11 @@
 }
 EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
 
-__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
-			      __u16 *mssp)
+__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mssp)
 {
 	const struct iphdr *iph = ip_hdr(skb);
 	const struct tcphdr *th = tcp_hdr(skb);
 
-	tcp_synq_overflow(sk);
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
-
 	return __cookie_v4_init_sequence(iph, th, mssp);
 }
 
@@ -229,6 +225,7 @@
 	child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
 	if (child) {
 		atomic_set(&req->rsk_refcnt, 1);
+		sock_rps_save_rxhash(child, skb);
 		inet_csk_reqsk_queue_add(sk, req, child);
 	} else {
 		reqsk_free(req);
@@ -288,6 +285,10 @@
 }
 EXPORT_SYMBOL(cookie_ecn_ok);
 
+/* On input, sk is a listener.
+ * Output is listener if incoming packet would not create a child
+ *           NULL if memory could not be allocated.
+ */
 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
 {
 	struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
@@ -326,7 +327,7 @@
 		goto out;
 
 	ret = NULL;
-	req = inet_reqsk_alloc(&tcp_request_sock_ops, sk); /* for safety */
+	req = inet_reqsk_alloc(&tcp_request_sock_ops, sk, false); /* for safety */
 	if (!req)
 		goto out;
 
@@ -345,7 +346,7 @@
 	ireq->wscale_ok		= tcp_opt.wscale_ok;
 	ireq->tstamp_ok		= tcp_opt.saw_tstamp;
 	req->ts_recent		= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
-	treq->snt_synack	= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
+	treq->snt_synack.v64	= 0;
 	treq->tfo_listener	= false;
 
 	ireq->ir_iif = sk->sk_bound_dev_if;
@@ -381,10 +382,10 @@
 	}
 
 	/* Try to redo what tcp_v4_send_synack did. */
-	req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
+	req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
 
 	tcp_select_initial_window(tcp_full_space(sk), req->mss,
-				  &req->rcv_wnd, &req->window_clamp,
+				  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
 				  ireq->wscale_ok, &rcv_wscale,
 				  dst_metric(&rt->dst, RTAX_INITRWND));
 
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 894da3a..30a531c 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -818,6 +818,13 @@
 		.proc_handler	= proc_dointvec
 	},
 	{
+		.procname	= "icmp_redirects_use_orig_daddr",
+		.data		= &init_net.ipv4.sysctl_icmp_redirects_use_orig_daddr,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
+	{
 		.procname	= "icmp_ratelimit",
 		.data		= &init_net.ipv4.sysctl_icmp_ratelimit,
 		.maxlen		= sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b8b8fa1..ac1bdbb 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -900,7 +900,8 @@
 	 */
 	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
 	    !tcp_passive_fastopen(sk)) {
-		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
+		err = sk_stream_wait_connect(sk, &timeo);
+		if (err != 0)
 			goto out_err;
 	}
 
@@ -967,7 +968,8 @@
 
 		copied += copy;
 		offset += copy;
-		if (!(size -= copy)) {
+		size -= copy;
+		if (!size) {
 			tcp_tx_timestamp(sk, skb);
 			goto out;
 		}
@@ -988,7 +990,8 @@
 		tcp_push(sk, flags & ~MSG_MORE, mss_now,
 			 TCP_NAGLE_PUSH, size_goal);
 
-		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
+		err = sk_stream_wait_memory(sk, &timeo);
+		if (err != 0)
 			goto do_error;
 
 		mss_now = tcp_send_mss(sk, &size_goal, flags);
@@ -1111,7 +1114,8 @@
 	 */
 	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
 	    !tcp_passive_fastopen(sk)) {
-		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
+		err = sk_stream_wait_connect(sk, &timeo);
+		if (err != 0)
 			goto do_error;
 	}
 
@@ -1267,7 +1271,8 @@
 			tcp_push(sk, flags & ~MSG_MORE, mss_now,
 				 TCP_NAGLE_PUSH, size_goal);
 
-		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
+		err = sk_stream_wait_memory(sk, &timeo);
+		if (err != 0)
 			goto do_error;
 
 		mss_now = tcp_send_mss(sk, &size_goal, flags);
@@ -1767,7 +1772,8 @@
 
 			/* __ Restore normal policy in scheduler __ */
 
-			if ((chunk = len - tp->ucopy.len) != 0) {
+			chunk = len - tp->ucopy.len;
+			if (chunk != 0) {
 				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
 				len -= chunk;
 				copied += chunk;
@@ -1778,7 +1784,8 @@
 do_prequeue:
 				tcp_prequeue_process(sk);
 
-				if ((chunk = len - tp->ucopy.len) != 0) {
+				chunk = len - tp->ucopy.len;
+				if (chunk != 0) {
 					NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
 					len -= chunk;
 					copied += chunk;
@@ -2230,7 +2237,8 @@
 	sk->sk_shutdown = 0;
 	sock_reset_flag(sk, SOCK_DONE);
 	tp->srtt_us = 0;
-	if ((tp->write_seq += tp->max_window + 2) == 0)
+	tp->write_seq += tp->max_window + 2;
+	if (tp->write_seq == 0)
 		tp->write_seq = 1;
 	icsk->icsk_backoff = 0;
 	tp->snd_cwnd = 2;
@@ -2253,13 +2261,6 @@
 }
 EXPORT_SYMBOL(tcp_disconnect);
 
-void tcp_sock_destruct(struct sock *sk)
-{
-	inet_sock_destruct(sk);
-
-	kfree(inet_csk(sk)->icsk_accept_queue.fastopenq);
-}
-
 static inline bool tcp_can_repair_sock(const struct sock *sk)
 {
 	return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
@@ -2581,7 +2582,7 @@
 		    TCPF_LISTEN))) {
 			tcp_fastopen_init_key_once(true);
 
-			err = fastopen_init_queue(sk, val);
+			fastopen_queue_tune(sk, val);
 		} else {
 			err = -EINVAL;
 		}
@@ -2849,10 +2850,7 @@
 		break;
 
 	case TCP_FASTOPEN:
-		if (icsk->icsk_accept_queue.fastopenq)
-			val = icsk->icsk_accept_queue.fastopenq->max_qlen;
-		else
-			val = 0;
+		val = icsk->icsk_accept_queue.fastopenq.max_qlen;
 		break;
 
 	case TCP_TIMESTAMP:
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 93c4dc3..882caa4 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -173,6 +173,10 @@
 	 */
 	if (ca->get_info)
 		memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+	if (ca->flags & TCP_CONG_NEEDS_ECN)
+		INET_ECN_xmit(sk);
+	else
+		INET_ECN_dontxmit(sk);
 }
 
 void tcp_init_congestion_control(struct sock *sk)
@@ -181,6 +185,10 @@
 
 	if (icsk->icsk_ca_ops->init)
 		icsk->icsk_ca_ops->init(sk);
+	if (tcp_ca_needs_ecn(sk))
+		INET_ECN_xmit(sk);
+	else
+		INET_ECN_dontxmit(sk);
 }
 
 static void tcp_reinit_congestion_control(struct sock *sk,
@@ -192,8 +200,8 @@
 	icsk->icsk_ca_ops = ca;
 	icsk->icsk_ca_setsockopt = 1;
 
-	if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
-		icsk->icsk_ca_ops->init(sk);
+	if (sk->sk_state != TCP_CLOSE)
+		tcp_init_congestion_control(sk);
 }
 
 /* Manage refcounts on socket close. */
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index c6ded6b..448c261 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -154,14 +154,20 @@
 static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
 {
 	if (event == CA_EVENT_TX_START) {
-		s32 delta = tcp_time_stamp - tcp_sk(sk)->lsndtime;
 		struct bictcp *ca = inet_csk_ca(sk);
+		u32 now = tcp_time_stamp;
+		s32 delta;
+
+		delta = now - tcp_sk(sk)->lsndtime;
 
 		/* We were application limited (idle) for a while.
 		 * Shift epoch_start to keep cwnd growth to cubic curve.
 		 */
-		if (ca->epoch_start && delta > 0)
+		if (ca->epoch_start && delta > 0) {
 			ca->epoch_start += delta;
+			if (after(ca->epoch_start, now))
+				ca->epoch_start = now;
+		}
 		return;
 	}
 }
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index f9c0fb8..93396bf 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -124,10 +124,10 @@
 	return false;
 }
 
-static bool tcp_fastopen_create_child(struct sock *sk,
-				      struct sk_buff *skb,
-				      struct dst_entry *dst,
-				      struct request_sock *req)
+static struct sock *tcp_fastopen_create_child(struct sock *sk,
+					      struct sk_buff *skb,
+					      struct dst_entry *dst,
+					      struct request_sock *req)
 {
 	struct tcp_sock *tp;
 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
@@ -140,11 +140,11 @@
 
 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
 	if (!child)
-		return false;
+		return NULL;
 
-	spin_lock(&queue->fastopenq->lock);
-	queue->fastopenq->qlen++;
-	spin_unlock(&queue->fastopenq->lock);
+	spin_lock(&queue->fastopenq.lock);
+	queue->fastopenq.qlen++;
+	spin_unlock(&queue->fastopenq.lock);
 
 	/* Initialize the child socket. Have to fix some values to take
 	 * into account the child is a Fast Open socket and is created
@@ -161,15 +161,13 @@
 	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
 
 	/* Activate the retrans timer so that SYNACK can be retransmitted.
-	 * The request socket is not added to the SYN table of the parent
+	 * The request socket is not added to the ehash
 	 * because it's been added to the accept queue directly.
 	 */
 	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
 				  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
 
-	atomic_set(&req->rsk_refcnt, 1);
-	/* Add the child socket directly into the accept queue */
-	inet_csk_reqsk_queue_add(sk, req, child);
+	atomic_set(&req->rsk_refcnt, 2);
 
 	/* Now finish processing the fastopen child socket. */
 	inet_csk(child)->icsk_af_ops->rebuild_header(child);
@@ -178,12 +176,10 @@
 	tcp_init_metrics(child);
 	tcp_init_buffer_space(child);
 
-	/* Queue the data carried in the SYN packet. We need to first
-	 * bump skb's refcnt because the caller will attempt to free it.
-	 * Note that IPv6 might also have used skb_get() trick
-	 * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts)
-	 * So we need to eventually get a clone of the packet,
-	 * before inserting it in sk_receive_queue.
+	/* Queue the data carried in the SYN packet.
+	 * We used to play tricky games with skb_get().
+	 * With lockless listener, it is a dead end.
+	 * Do not think about it.
 	 *
 	 * XXX (TFO) - we honor a zero-payload TFO request for now,
 	 * (any reason not to?) but no need to queue the skb since
@@ -191,12 +187,7 @@
 	 */
 	end_seq = TCP_SKB_CB(skb)->end_seq;
 	if (end_seq != TCP_SKB_CB(skb)->seq + 1) {
-		struct sk_buff *skb2;
-
-		if (unlikely(skb_shared(skb)))
-			skb2 = skb_clone(skb, GFP_ATOMIC);
-		else
-			skb2 = skb_get(skb);
+		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
 		if (likely(skb2)) {
 			skb_dst_drop(skb2);
@@ -214,11 +205,10 @@
 		}
 	}
 	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq;
-	sk->sk_data_ready(sk);
-	bh_unlock_sock(child);
-	sock_put(child);
-	WARN_ON(!req->sk);
-	return true;
+	/* tcp_conn_request() is sending the SYNACK,
+	 * and queues the child into listener accept queue.
+	 */
+	return child;
 }
 
 static bool tcp_fastopen_queue_check(struct sock *sk)
@@ -235,8 +225,8 @@
 	 * between qlen overflow causing Fast Open to be disabled
 	 * temporarily vs a server not supporting Fast Open at all.
 	 */
-	fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
-	if (!fastopenq || fastopenq->max_qlen == 0)
+	fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
+	if (fastopenq->max_qlen == 0)
 		return false;
 
 	if (fastopenq->qlen >= fastopenq->max_qlen) {
@@ -261,13 +251,14 @@
  * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
  * cookie request (foc->len == 0).
  */
-bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
-		      struct request_sock *req,
-		      struct tcp_fastopen_cookie *foc,
-		      struct dst_entry *dst)
+struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
+			      struct request_sock *req,
+			      struct tcp_fastopen_cookie *foc,
+			      struct dst_entry *dst)
 {
 	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
 	bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
+	struct sock *child;
 
 	if (foc->len == 0) /* Client requests a cookie */
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
@@ -276,7 +267,7 @@
 	      (syn_data || foc->len >= 0) &&
 	      tcp_fastopen_queue_check(sk))) {
 		foc->len = -1;
-		return false;
+		return NULL;
 	}
 
 	if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD))
@@ -296,11 +287,12 @@
 		 * data in SYN_RECV state.
 		 */
 fastopen:
-		if (tcp_fastopen_create_child(sk, skb, dst, req)) {
+		child = tcp_fastopen_create_child(sk, skb, dst, req);
+		if (child) {
 			foc->len = -1;
 			NET_INC_STATS_BH(sock_net(sk),
 					 LINUX_MIB_TCPFASTOPENPASSIVE);
-			return true;
+			return child;
 		}
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
 	} else if (foc->len > 0) /* Client presents an invalid cookie */
@@ -308,6 +300,5 @@
 
 	valid_foc.exp = foc->exp;
 	*foc = valid_foc;
-	return false;
+	return NULL;
 }
-EXPORT_SYMBOL(tcp_try_fastopen);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a62e9c7..3b35c3f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2953,21 +2953,21 @@
 }
 
 /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
-static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
+void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
-	long seq_rtt_us = -1L;
+	long rtt_us = -1L;
 
-	if (synack_stamp && !tp->total_retrans)
-		seq_rtt_us = jiffies_to_usecs(tcp_time_stamp - synack_stamp);
+	if (req && !req->num_retrans && tcp_rsk(req)->snt_synack.v64) {
+		struct skb_mstamp now;
 
-	/* If the ACK acks both the SYNACK and the (Fast Open'd) data packets
-	 * sent in SYN_RECV, SYNACK RTT is the smooth RTT computed in tcp_ack()
-	 */
-	if (!tp->srtt_us)
-		tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L);
+		skb_mstamp_get(&now);
+		rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack);
+	}
+
+	tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L);
 }
 
+
 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -5472,7 +5472,7 @@
 }
 
 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
-					 const struct tcphdr *th, unsigned int len)
+					 const struct tcphdr *th)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -5698,15 +5698,14 @@
  *	address independent.
  */
 
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-			  const struct tcphdr *th, unsigned int len)
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct inet_connection_sock *icsk = inet_csk(sk);
+	const struct tcphdr *th = tcp_hdr(skb);
 	struct request_sock *req;
 	int queued = 0;
 	bool acceptable;
-	u32 synack_stamp;
 
 	tp->rx_opt.saw_tstamp = 0;
 
@@ -5750,7 +5749,7 @@
 		goto discard;
 
 	case TCP_SYN_SENT:
-		queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
+		queued = tcp_rcv_synsent_state_process(sk, skb, th);
 		if (queued >= 0)
 			return queued;
 
@@ -5785,15 +5784,16 @@
 		if (!acceptable)
 			return 1;
 
+		if (!tp->srtt_us)
+			tcp_synack_rtt_meas(sk, req);
+
 		/* Once we leave TCP_SYN_RECV, we no longer need req
 		 * so release it.
 		 */
 		if (req) {
-			synack_stamp = tcp_rsk(req)->snt_synack;
 			tp->total_retrans = req->num_retrans;
 			reqsk_fastopen_remove(sk, req, false);
 		} else {
-			synack_stamp = tp->lsndtime;
 			/* Make sure socket is routed, for correct metrics. */
 			icsk->icsk_af_ops->rebuild_header(sk);
 			tcp_init_congestion_control(sk);
@@ -5816,7 +5816,6 @@
 		tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
 		tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
 		tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
-		tcp_synack_rtt_meas(sk, synack_stamp);
 
 		if (tp->rx_opt.tstamp_ok)
 			tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
@@ -6023,11 +6022,11 @@
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
 
-	req->rcv_wnd = 0;		/* So that tcp_send_synack() knows! */
+	req->rsk_rcv_wnd = 0;		/* So that tcp_send_synack() knows! */
 	req->cookie_ts = 0;
 	tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
 	tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
-	tcp_rsk(req)->snt_synack = tcp_time_stamp;
+	skb_mstamp_get(&tcp_rsk(req)->snt_synack);
 	tcp_rsk(req)->last_oow_ack_time = 0;
 	req->mss = rx_opt->mss_clamp;
 	req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
@@ -6043,9 +6042,11 @@
 }
 
 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
-				      struct sock *sk_listener)
+				      struct sock *sk_listener,
+				      bool attach_listener)
 {
-	struct request_sock *req = reqsk_alloc(ops, sk_listener);
+	struct request_sock *req = reqsk_alloc(ops, sk_listener,
+					       attach_listener);
 
 	if (req) {
 		struct inet_request_sock *ireq = inet_rsk(req);
@@ -6065,13 +6066,13 @@
 /*
  * Return true if a syncookie should be sent
  */
-static bool tcp_syn_flood_action(struct sock *sk,
+static bool tcp_syn_flood_action(const struct sock *sk,
 				 const struct sk_buff *skb,
 				 const char *proto)
 {
+	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
 	const char *msg = "Dropping request";
 	bool want_cookie = false;
-	struct listen_sock *lopt;
 
 #ifdef CONFIG_SYN_COOKIES
 	if (sysctl_tcp_syncookies) {
@@ -6082,12 +6083,12 @@
 #endif
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
 
-	lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
-	if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
-		lopt->synflood_warned = 1;
+	if (!queue->synflood_warned &&
+	    sysctl_tcp_syncookies != 2 &&
+	    xchg(&queue->synflood_warned, 1) == 0)
 		pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
 			proto, ntohs(tcp_hdr(skb)->dest), msg);
-	}
+
 	return want_cookie;
 }
 
@@ -6112,16 +6113,15 @@
 		     const struct tcp_request_sock_ops *af_ops,
 		     struct sock *sk, struct sk_buff *skb)
 {
-	struct tcp_options_received tmp_opt;
-	struct request_sock *req;
-	struct tcp_sock *tp = tcp_sk(sk);
-	struct dst_entry *dst = NULL;
-	__u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn;
-	bool want_cookie = false, fastopen;
-	struct flowi fl;
 	struct tcp_fastopen_cookie foc = { .len = -1 };
-	int err;
-
+	__u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn;
+	struct tcp_options_received tmp_opt;
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct sock *fastopen_sk = NULL;
+	struct dst_entry *dst = NULL;
+	struct request_sock *req;
+	bool want_cookie = false;
+	struct flowi fl;
 
 	/* TW buckets are converted to open requests without
 	 * limitations, they conserve resources and peer is
@@ -6145,7 +6145,7 @@
 		goto drop;
 	}
 
-	req = inet_reqsk_alloc(rsk_ops, sk);
+	req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie);
 	if (!req)
 		goto drop;
 
@@ -6230,19 +6230,28 @@
 	tcp_rsk(req)->snt_isn = isn;
 	tcp_rsk(req)->txhash = net_tx_rndhash();
 	tcp_openreq_init_rwin(req, sk, dst);
-	fastopen = !want_cookie &&
-		   tcp_try_fastopen(sk, skb, req, &foc, dst);
-	err = af_ops->send_synack(sk, dst, &fl, req,
-				  skb_get_queue_mapping(skb), &foc);
-	if (!fastopen) {
-		if (err || want_cookie)
-			goto drop_and_free;
-
-		tcp_rsk(req)->tfo_listener = false;
-		af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+	if (!want_cookie) {
+		tcp_reqsk_record_syn(sk, req, skb);
+		fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
 	}
-	tcp_reqsk_record_syn(sk, req, skb);
-
+	if (fastopen_sk) {
+		af_ops->send_synack(fastopen_sk, dst, &fl, req,
+				    skb_get_queue_mapping(skb), &foc, false);
+		/* Add the child socket directly into the accept queue */
+		inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
+		sk->sk_data_ready(sk);
+		bh_unlock_sock(fastopen_sk);
+		sock_put(fastopen_sk);
+	} else {
+		tcp_rsk(req)->tfo_listener = false;
+		if (!want_cookie)
+			inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+		af_ops->send_synack(sk, dst, &fl, req,
+				    skb_get_queue_mapping(skb), &foc, !want_cookie);
+		if (want_cookie)
+			goto drop_and_free;
+	}
+	reqsk_put(req);
 	return 0;
 
 drop_and_release:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d671d74..ddb1983 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -576,7 +576,7 @@
  *	Exception: precedence violation. We do not implement it in any case.
  */
 
-static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
+static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
 {
 	const struct tcphdr *th = tcp_hdr(skb);
 	struct {
@@ -795,7 +795,7 @@
 	inet_twsk_put(tw);
 }
 
-static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 				  struct request_sock *req)
 {
 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
@@ -803,7 +803,7 @@
 	 */
 	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
-			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
+			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
 			tcp_time_stamp,
 			req->ts_recent,
 			0,
@@ -818,11 +818,12 @@
  *	This still operates on a request_sock only, not on a big
  *	socket.
  */
-static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
+static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
 			      struct flowi *fl,
 			      struct request_sock *req,
 			      u16 queue_mapping,
-			      struct tcp_fastopen_cookie *foc)
+			      struct tcp_fastopen_cookie *foc,
+				  bool attach_req)
 {
 	const struct inet_request_sock *ireq = inet_rsk(req);
 	struct flowi4 fl4;
@@ -833,7 +834,7 @@
 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
 		return -1;
 
-	skb = tcp_make_synack(sk, dst, req, foc);
+	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
 
 	if (skb) {
 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
@@ -865,7 +866,7 @@
  */
 
 /* Find the Key structure for an address.  */
-struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
+struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
 					 const union tcp_md5_addr *addr,
 					 int family)
 {
@@ -877,7 +878,7 @@
 	/* caller either holds rcu_read_lock() or socket lock */
 	md5sig = rcu_dereference_check(tp->md5sig_info,
 				       sock_owned_by_user(sk) ||
-				       lockdep_is_held(&sk->sk_lock.slock));
+				       lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
 	if (!md5sig)
 		return NULL;
 #if IS_ENABLED(CONFIG_IPV6)
@@ -894,7 +895,7 @@
 }
 EXPORT_SYMBOL(tcp_md5_do_lookup);
 
-struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
+struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
 					 const struct sock *addr_sk)
 {
 	const union tcp_md5_addr *addr;
@@ -1112,10 +1113,13 @@
 }
 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
 
+#endif
+
 /* Called with rcu_read_lock() */
-static bool tcp_v4_inbound_md5_hash(struct sock *sk,
+static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
 				    const struct sk_buff *skb)
 {
+#ifdef CONFIG_TCP_MD5SIG
 	/*
 	 * This gets called for each TCP segment that arrives
 	 * so we want to be efficient.
@@ -1165,10 +1169,12 @@
 		return true;
 	}
 	return false;
-}
 #endif
+	return false;
+}
 
-static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
+static void tcp_v4_init_req(struct request_sock *req,
+			    const struct sock *sk_listener,
 			    struct sk_buff *skb)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
@@ -1179,7 +1185,8 @@
 	ireq->opt = tcp_v4_save_options(skb);
 }
 
-static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
+static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
+					  struct flowi *fl,
 					  const struct request_sock *req,
 					  bool *strict)
 {
@@ -1218,7 +1225,6 @@
 	.route_req	=	tcp_v4_route_req,
 	.init_seq	=	tcp_v4_init_sequence,
 	.send_synack	=	tcp_v4_send_synack,
-	.queue_hash_add =	inet_csk_reqsk_queue_hash_add,
 };
 
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
@@ -1241,7 +1247,7 @@
  * The three way handshake has completed - we got a valid synack -
  * now create the new socket.
  */
-struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 				  struct request_sock *req,
 				  struct dst_entry *dst)
 {
@@ -1276,7 +1282,6 @@
 	newinet->mc_index     = inet_iif(skb);
 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
 	newinet->rcv_tos      = ip_hdr(skb)->tos;
-	newsk->sk_txhash      = tcp_rsk(req)->txhash;
 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
 	if (inet_opt)
 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
@@ -1338,34 +1343,11 @@
 }
 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
 
-static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
+static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
 {
-	const struct tcphdr *th = tcp_hdr(skb);
-	const struct iphdr *iph = ip_hdr(skb);
-	struct request_sock *req;
-	struct sock *nsk;
-
-	req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
-	if (req) {
-		nsk = tcp_check_req(sk, skb, req, false);
-		if (!nsk || nsk == sk)
-			reqsk_put(req);
-		return nsk;
-	}
-
-	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
-			th->source, iph->daddr, th->dest, inet_iif(skb));
-
-	if (nsk) {
-		if (nsk->sk_state != TCP_TIME_WAIT) {
-			bh_lock_sock(nsk);
-			return nsk;
-		}
-		inet_twsk_put(inet_twsk(nsk));
-		return NULL;
-	}
-
 #ifdef CONFIG_SYN_COOKIES
+	const struct tcphdr *th = tcp_hdr(skb);
+
 	if (!th->syn)
 		sk = cookie_v4_check(sk, skb);
 #endif
@@ -1373,7 +1355,7 @@
 }
 
 /* The socket must have it's spinlock held when we get
- * here.
+ * here, unless it is a TCP_LISTEN socket.
  *
  * We have a potential double-lock case here, so even when
  * doing backlog processing we use the BH locking scheme.
@@ -1404,13 +1386,13 @@
 		goto csum_err;
 
 	if (sk->sk_state == TCP_LISTEN) {
-		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
+		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
+
 		if (!nsk)
 			goto discard;
-
 		if (nsk != sk) {
 			sock_rps_save_rxhash(nsk, skb);
-			sk_mark_napi_id(sk, skb);
+			sk_mark_napi_id(nsk, skb);
 			if (tcp_child_process(sk, nsk, skb)) {
 				rsk = nsk;
 				goto reset;
@@ -1420,7 +1402,7 @@
 	} else
 		sock_rps_save_rxhash(sk, skb);
 
-	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
+	if (tcp_rcv_state_process(sk, skb)) {
 		rsk = sk;
 		goto reset;
 	}
@@ -1598,6 +1580,29 @@
 	if (sk->sk_state == TCP_TIME_WAIT)
 		goto do_time_wait;
 
+	if (sk->sk_state == TCP_NEW_SYN_RECV) {
+		struct request_sock *req = inet_reqsk(sk);
+		struct sock *nsk = NULL;
+
+		sk = req->rsk_listener;
+		if (tcp_v4_inbound_md5_hash(sk, skb))
+			goto discard_and_relse;
+		if (sk->sk_state == TCP_LISTEN)
+			nsk = tcp_check_req(sk, skb, req, false);
+		if (!nsk) {
+			reqsk_put(req);
+			goto discard_it;
+		}
+		if (nsk == sk) {
+			sock_hold(sk);
+			reqsk_put(req);
+		} else if (tcp_child_process(sk, nsk, skb)) {
+			tcp_v4_send_reset(nsk, skb);
+			goto discard_it;
+		} else {
+			return 0;
+		}
+	}
 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 		goto discard_and_relse;
@@ -1606,25 +1611,23 @@
 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
 		goto discard_and_relse;
 
-#ifdef CONFIG_TCP_MD5SIG
-	/*
-	 * We really want to reject the packet as early as possible
-	 * if:
-	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
-	 *  o There is an MD5 option and we're not expecting one
-	 */
 	if (tcp_v4_inbound_md5_hash(sk, skb))
 		goto discard_and_relse;
-#endif
 
 	nf_reset(skb);
 
 	if (sk_filter(sk, skb))
 		goto discard_and_relse;
 
-	sk_incoming_cpu_update(sk);
 	skb->dev = NULL;
 
+	if (sk->sk_state == TCP_LISTEN) {
+		ret = tcp_v4_do_rcv(sk, skb);
+		goto put_and_return;
+	}
+
+	sk_incoming_cpu_update(sk);
+
 	bh_lock_sock_nested(sk);
 	tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
 	ret = 0;
@@ -1639,6 +1642,7 @@
 	}
 	bh_unlock_sock(sk);
 
+put_and_return:
 	sock_put(sk);
 
 	return ret;
@@ -1833,35 +1837,7 @@
 	++st->num;
 	++st->offset;
 
-	if (st->state == TCP_SEQ_STATE_OPENREQ) {
-		struct request_sock *req = cur;
-
-		icsk = inet_csk(st->syn_wait_sk);
-		req = req->dl_next;
-		while (1) {
-			while (req) {
-				if (req->rsk_ops->family == st->family) {
-					cur = req;
-					goto out;
-				}
-				req = req->dl_next;
-			}
-			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
-				break;
-get_req:
-			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
-		}
-		sk	  = sk_nulls_next(st->syn_wait_sk);
-		st->state = TCP_SEQ_STATE_LISTENING;
-		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
-	} else {
-		icsk = inet_csk(sk);
-		spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
-		if (reqsk_queue_len(&icsk->icsk_accept_queue))
-			goto start_req;
-		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
-		sk = sk_nulls_next(sk);
-	}
+	sk = sk_nulls_next(sk);
 get_sk:
 	sk_nulls_for_each_from(sk, node) {
 		if (!net_eq(sock_net(sk), net))
@@ -1871,16 +1847,6 @@
 			goto out;
 		}
 		icsk = inet_csk(sk);
-		spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
-		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
-start_req:
-			st->uid		= sock_i_uid(sk);
-			st->syn_wait_sk = sk;
-			st->state	= TCP_SEQ_STATE_OPENREQ;
-			st->sbucket	= 0;
-			goto get_req;
-		}
-		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
 	}
 	spin_unlock_bh(&ilb->lock);
 	st->offset = 0;
@@ -2012,7 +1978,6 @@
 	void *rc = NULL;
 
 	switch (st->state) {
-	case TCP_SEQ_STATE_OPENREQ:
 	case TCP_SEQ_STATE_LISTENING:
 		if (st->bucket >= INET_LHTABLE_SIZE)
 			break;
@@ -2071,7 +2036,6 @@
 	}
 
 	switch (st->state) {
-	case TCP_SEQ_STATE_OPENREQ:
 	case TCP_SEQ_STATE_LISTENING:
 		rc = listening_get_next(seq, v);
 		if (!rc) {
@@ -2096,11 +2060,6 @@
 	struct tcp_iter_state *st = seq->private;
 
 	switch (st->state) {
-	case TCP_SEQ_STATE_OPENREQ:
-		if (v) {
-			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
-			spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
-		}
 	case TCP_SEQ_STATE_LISTENING:
 		if (v != SEQ_START_TOKEN)
 			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
@@ -2154,7 +2113,7 @@
 EXPORT_SYMBOL(tcp_proc_unregister);
 
 static void get_openreq4(const struct request_sock *req,
-			 struct seq_file *f, int i, kuid_t uid)
+			 struct seq_file *f, int i)
 {
 	const struct inet_request_sock *ireq = inet_rsk(req);
 	long delta = req->rsk_timer.expires - jiffies;
@@ -2171,7 +2130,8 @@
 		1,    /* timers active (only the expire timer) */
 		jiffies_delta_to_clock_t(delta),
 		req->num_timeout,
-		from_kuid_munged(seq_user_ns(f), uid),
+		from_kuid_munged(seq_user_ns(f),
+				 sock_i_uid(req->rsk_listener)),
 		0,  /* non standard timer */
 		0, /* open_requests have no inode */
 		0,
@@ -2185,7 +2145,7 @@
 	const struct tcp_sock *tp = tcp_sk(sk);
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	const struct inet_sock *inet = inet_sk(sk);
-	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
+	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
 	__be32 dest = inet->inet_daddr;
 	__be32 src = inet->inet_rcv_saddr;
 	__u16 destp = ntohs(inet->inet_dport);
@@ -2272,18 +2232,12 @@
 	}
 	st = seq->private;
 
-	switch (st->state) {
-	case TCP_SEQ_STATE_LISTENING:
-	case TCP_SEQ_STATE_ESTABLISHED:
-		if (sk->sk_state == TCP_TIME_WAIT)
-			get_timewait4_sock(v, seq, st->num);
-		else
-			get_tcp4_sock(v, seq, st->num);
-		break;
-	case TCP_SEQ_STATE_OPENREQ:
-		get_openreq4(v, seq, st->num, st->uid);
-		break;
-	}
+	if (sk->sk_state == TCP_TIME_WAIT)
+		get_timewait4_sock(v, seq, st->num);
+	else if (sk->sk_state == TCP_NEW_SYN_RECV)
+		get_openreq4(v, seq, st->num);
+	else
+		get_tcp4_sock(v, seq, st->num);
 out:
 	seq_pad(seq, '\n');
 	return 0;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6d8795b..41828bd 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -162,9 +162,9 @@
 		if (tcp_death_row.sysctl_tw_recycle &&
 		    tcptw->tw_ts_recent_stamp &&
 		    tcp_tw_remember_stamp(tw))
-			inet_twsk_schedule(tw, tw->tw_timeout);
+			inet_twsk_reschedule(tw, tw->tw_timeout);
 		else
-			inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
+			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
 		return TCP_TW_ACK;
 	}
 
@@ -201,7 +201,7 @@
 				return TCP_TW_SUCCESS;
 			}
 		}
-		inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
+		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
 
 		if (tmp_opt.saw_tstamp) {
 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
@@ -251,7 +251,7 @@
 		 * Do not reschedule in the last case.
 		 */
 		if (paws_reject || th->ack)
-			inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
+			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
 
 		return tcp_timewait_check_oow_rate_limit(
 			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
@@ -322,9 +322,6 @@
 		} while (0);
 #endif
 
-		/* Linkage updates. */
-		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
-
 		/* Get the TIME_WAIT timeout firing. */
 		if (timeo < rto)
 			timeo = rto;
@@ -338,6 +335,8 @@
 		}
 
 		inet_twsk_schedule(tw, timeo);
+		/* Linkage updates. */
+		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
 		inet_twsk_put(tw);
 	} else {
 		/* Sorry, if we're out of memory, just CLOSE this
@@ -362,30 +361,38 @@
 }
 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
 
+/* Warning : This function is called without sk_listener being locked.
+ * Be sure to read socket fields once, as their value could change under us.
+ */
 void tcp_openreq_init_rwin(struct request_sock *req,
-			   struct sock *sk, struct dst_entry *dst)
+			   const struct sock *sk_listener,
+			   const struct dst_entry *dst)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
-	struct tcp_sock *tp = tcp_sk(sk);
-	__u8 rcv_wscale;
+	const struct tcp_sock *tp = tcp_sk(sk_listener);
+	u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
+	int full_space = tcp_full_space(sk_listener);
 	int mss = dst_metric_advmss(dst);
+	u32 window_clamp;
+	__u8 rcv_wscale;
 
-	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
-		mss = tp->rx_opt.user_mss;
+	if (user_mss && user_mss < mss)
+		mss = user_mss;
 
+	window_clamp = READ_ONCE(tp->window_clamp);
 	/* Set this up on the first call only */
-	req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
+	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
 
 	/* limit the window selection if the user enforce a smaller rx buffer */
-	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
-	    (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
-		req->window_clamp = tcp_full_space(sk);
+	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
+	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
+		req->rsk_window_clamp = full_space;
 
 	/* tcp_full_space because it is guaranteed to be the first packet */
-	tcp_select_initial_window(tcp_full_space(sk),
+	tcp_select_initial_window(full_space,
 		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
-		&req->rcv_wnd,
-		&req->window_clamp,
+		&req->rsk_rcv_wnd,
+		&req->rsk_window_clamp,
 		ireq->wscale_ok,
 		&rcv_wscale,
 		dst_metric(dst, RTAX_INITRWND));
@@ -434,7 +441,9 @@
  * Actually, we could lots of memory writes here. tp of listening
  * socket contains all necessary default parameters.
  */
-struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
+struct sock *tcp_create_openreq_child(const struct sock *sk,
+				      struct request_sock *req,
+				      struct sk_buff *skb)
 {
 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
 
@@ -470,7 +479,8 @@
 		newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 		tcp_enable_early_retrans(newtp);
 		newtp->tlp_high_seq = 0;
-		newtp->lsndtime = treq->snt_synack;
+		newtp->lsndtime = treq->snt_synack.stamp_jiffies;
+		newsk->sk_txhash = treq->txhash;
 		newtp->last_oow_ack_time = 0;
 		newtp->total_retrans = req->num_retrans;
 
@@ -502,9 +512,9 @@
 			if (sysctl_tcp_fack)
 				tcp_enable_fack(newtp);
 		}
-		newtp->window_clamp = req->window_clamp;
-		newtp->rcv_ssthresh = req->rcv_wnd;
-		newtp->rcv_wnd = req->rcv_wnd;
+		newtp->window_clamp = req->rsk_window_clamp;
+		newtp->rcv_ssthresh = req->rsk_rcv_wnd;
+		newtp->rcv_wnd = req->rsk_rcv_wnd;
 		newtp->rx_opt.wscale_ok = ireq->wscale_ok;
 		if (newtp->rx_opt.wscale_ok) {
 			newtp->rx_opt.snd_wscale = ireq->snd_wscale;
@@ -568,8 +578,6 @@
 	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
 	bool paws_reject = false;
 
-	BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN));
-
 	tmp_opt.saw_tstamp = 0;
 	if (th->doff > (sizeof(struct tcphdr)>>2)) {
 		tcp_parse_options(skb, &tmp_opt, 0, NULL);
@@ -699,7 +707,7 @@
 	/* RFC793: "first check sequence number". */
 
 	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
-					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) {
+					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
 		/* Out of window: send ACK and drop. */
 		if (!(flg & TCP_FLAG_RST))
 			req->rsk_ops->send_ack(sk, skb, req);
@@ -760,6 +768,8 @@
 	if (!child)
 		goto listen_overflow;
 
+	sock_rps_save_rxhash(child, skb);
+	tcp_synack_rtt_meas(child, req);
 	inet_csk_reqsk_queue_drop(sk, req);
 	inet_csk_reqsk_queue_add(sk, req, child);
 	/* Warning: caller must not call reqsk_put(req);
@@ -812,8 +822,7 @@
 	int state = child->sk_state;
 
 	if (!sock_owned_by_user(child)) {
-		ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
-					    skb->len);
+		ret = tcp_rcv_state_process(child, skb);
 		/* Wakeup parent, send SIGIO */
 		if (state == TCP_SYN_RECV && child->sk_state != state)
 			parent->sk_data_ready(parent);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d0ad355..6e79fcb 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -357,14 +357,10 @@
 }
 
 static void
-tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th,
-		    struct sock *sk)
+tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
 {
-	if (inet_rsk(req)->ecn_ok) {
+	if (inet_rsk(req)->ecn_ok)
 		th->ece = 1;
-		if (tcp_ca_needs_ecn(sk))
-			INET_ECN_xmit(sk);
-	}
 }
 
 /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
@@ -612,12 +608,11 @@
 }
 
 /* Set up TCP options for SYN-ACKs. */
-static unsigned int tcp_synack_options(struct sock *sk,
-				   struct request_sock *req,
-				   unsigned int mss, struct sk_buff *skb,
-				   struct tcp_out_options *opts,
-				   const struct tcp_md5sig_key *md5,
-				   struct tcp_fastopen_cookie *foc)
+static unsigned int tcp_synack_options(struct request_sock *req,
+				       unsigned int mss, struct sk_buff *skb,
+				       struct tcp_out_options *opts,
+				       const struct tcp_md5sig_key *md5,
+				       struct tcp_fastopen_cookie *foc)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
@@ -1827,7 +1822,7 @@
 
 	/* Ok, it looks like it is advisable to defer. */
 
-	if (cong_win < send_win && cong_win < skb->len)
+	if (cong_win < send_win && cong_win <= skb->len)
 		*is_cwnd_limited = true;
 
 	return true;
@@ -2060,7 +2055,6 @@
 
 		cwnd_quota = tcp_cwnd_test(tp, skb);
 		if (!cwnd_quota) {
-			is_cwnd_limited = true;
 			if (push_one == 2)
 				/* Force out a loss probe pkt. */
 				cwnd_quota = 1;
@@ -2142,6 +2136,7 @@
 		/* Send one loss probe per tail loss episode. */
 		if (push_one != 2)
 			tcp_schedule_loss_probe(sk);
+		is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
 		tcp_cwnd_validate(sk, is_cwnd_limited);
 		return false;
 	}
@@ -2165,7 +2160,7 @@
 	/* Don't do any loss probe on a Fast Open connection before 3WHS
 	 * finishes.
 	 */
-	if (sk->sk_state == TCP_SYN_RECV)
+	if (tp->fastopen_rsk)
 		return false;
 
 	/* TLP is only scheduled when next timer event is RTO. */
@@ -2175,7 +2170,7 @@
 	/* Schedule a loss probe in 2*RTT for SACK capable connections
 	 * in Open state, that are either limited by cwnd or application.
 	 */
-	if (sysctl_tcp_early_retrans < 3 || !tp->srtt_us || !tp->packets_out ||
+	if (sysctl_tcp_early_retrans < 3 || !tp->packets_out ||
 	    !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
 		return false;
 
@@ -2184,9 +2179,10 @@
 		return false;
 
 	/* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
-	 * for delayed ack when there's one outstanding packet.
+	 * for delayed ack when there's one outstanding packet. If no RTT
+	 * sample is available then probe after TCP_TIMEOUT_INIT.
 	 */
-	timeout = rtt << 1;
+	timeout = rtt << 1 ? : TCP_TIMEOUT_INIT;
 	if (tp->packets_out == 1)
 		timeout = max_t(u32, timeout,
 				(rtt + (rtt >> 1) + TCP_DELACK_MAX));
@@ -2897,6 +2893,7 @@
 	skb_reserve(skb, MAX_TCP_HEADER);
 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
 			     TCPHDR_ACK | TCPHDR_RST);
+	skb_mstamp_get(&skb->skb_mstamp);
 	/* Send it off. */
 	if (tcp_transmit_skb(sk, skb, 0, priority))
 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
@@ -2948,20 +2945,22 @@
  * Allocate one skb and build a SYNACK packet.
  * @dst is consumed : Caller should not use it again.
  */
-struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 				struct request_sock *req,
-				struct tcp_fastopen_cookie *foc)
+				struct tcp_fastopen_cookie *foc,
+				bool attach_req)
 {
-	struct tcp_out_options opts;
 	struct inet_request_sock *ireq = inet_rsk(req);
-	struct tcp_sock *tp = tcp_sk(sk);
-	struct tcphdr *th;
-	struct sk_buff *skb;
+	const struct tcp_sock *tp = tcp_sk(sk);
 	struct tcp_md5sig_key *md5 = NULL;
+	struct tcp_out_options opts;
+	struct sk_buff *skb;
 	int tcp_header_size;
+	struct tcphdr *th;
+	u16 user_mss;
 	int mss;
 
-	skb = sock_wmalloc(sk, MAX_TCP_HEADER, 1, GFP_ATOMIC);
+	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
 	if (unlikely(!skb)) {
 		dst_release(dst);
 		return NULL;
@@ -2969,11 +2968,23 @@
 	/* Reserve space for headers. */
 	skb_reserve(skb, MAX_TCP_HEADER);
 
+	if (attach_req) {
+		skb->destructor = sock_edemux;
+		sock_hold(req_to_sk(req));
+		skb->sk = req_to_sk(req);
+	} else {
+		/* sk is a const pointer, because we want to express multiple
+		 * cpu might call us concurrently.
+		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
+		 */
+		skb_set_owner_w(skb, (struct sock *)sk);
+	}
 	skb_dst_set(skb, dst);
 
 	mss = dst_metric_advmss(dst);
-	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
-		mss = tp->rx_opt.user_mss;
+	user_mss = READ_ONCE(tp->rx_opt.user_mss);
+	if (user_mss && user_mss < mss)
+		mss = user_mss;
 
 	memset(&opts, 0, sizeof(opts));
 #ifdef CONFIG_SYN_COOKIES
@@ -2988,8 +2999,8 @@
 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
 #endif
 	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
-	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
-					     foc) + sizeof(*th);
+	tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) +
+			  sizeof(*th);
 
 	skb_push(skb, tcp_header_size);
 	skb_reset_transport_header(skb);
@@ -2998,7 +3009,7 @@
 	memset(th, 0, sizeof(struct tcphdr));
 	th->syn = 1;
 	th->ack = 1;
-	tcp_ecn_make_synack(req, th, sk);
+	tcp_ecn_make_synack(req, th);
 	th->source = htons(ireq->ir_num);
 	th->dest = ireq->ir_rmt_port;
 	/* Setting of flags are superfluous here for callers (and ECE is
@@ -3012,8 +3023,8 @@
 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
 
 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
-	th->window = htons(min(req->rcv_wnd, 65535U));
-	tcp_options_write((__be32 *)(th + 1), tp, &opts);
+	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
+	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
 	th->doff = (tcp_header_size >> 2);
 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
 
@@ -3500,14 +3511,14 @@
 				  TCP_RTO_MAX);
 }
 
-int tcp_rtx_synack(struct sock *sk, struct request_sock *req)
+int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
 {
 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
 	struct flowi fl;
 	int res;
 
 	tcp_rsk(req)->txhash = net_tx_rndhash();
-	res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL);
+	res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL, true);
 	if (!res) {
 		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 7149ebc..c9c716a 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -83,7 +83,7 @@
 }
 
 /* Calculate maximal number or retries on an orphaned socket. */
-static int tcp_orphan_retries(struct sock *sk, int alive)
+static int tcp_orphan_retries(struct sock *sk, bool alive)
 {
 	int retries = sysctl_tcp_orphan_retries; /* May be zero. */
 
@@ -184,7 +184,7 @@
 
 		retry_until = sysctl_tcp_retries2;
 		if (sock_flag(sk, SOCK_DEAD)) {
-			const int alive = icsk->icsk_rto < TCP_RTO_MAX;
+			const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
 
 			retry_until = tcp_orphan_retries(sk, alive);
 			do_reset = alive ||
@@ -298,7 +298,7 @@
 
 	max_probes = sysctl_tcp_retries2;
 	if (sock_flag(sk, SOCK_DEAD)) {
-		const int alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
+		const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
 
 		max_probes = tcp_orphan_retries(sk, alive);
 		if (!alive && icsk->icsk_backoff >= max_probes)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index c0a15e7..24ec14f9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -375,7 +375,8 @@
 			return -1;
 		score += 4;
 	}
-
+	if (sk->sk_incoming_cpu == raw_smp_processor_id())
+		score++;
 	return score;
 }
 
@@ -419,6 +420,9 @@
 		score += 4;
 	}
 
+	if (sk->sk_incoming_cpu == raw_smp_processor_id())
+		score++;
+
 	return score;
 }
 
@@ -1017,29 +1021,14 @@
 
 		fl4 = &fl4_stack;
 
-		/* unconnected socket. If output device is enslaved to a VRF
-		 * device lookup source address from VRF table. This mimics
-		 * behavior of ip_route_connect{_init}.
-		 */
-		if (netif_index_is_vrf(net, ipc.oif)) {
-			flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
-					   RT_SCOPE_UNIVERSE, sk->sk_protocol,
-					   (flow_flags | FLOWI_FLAG_VRFSRC),
-					   faddr, saddr, dport,
-					   inet->inet_sport);
-
-			rt = ip_route_output_flow(net, fl4, sk);
-			if (!IS_ERR(rt)) {
-				saddr = fl4->saddr;
-				ip_rt_put(rt);
-			}
-		}
-
 		flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
 				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
 				   flow_flags,
 				   faddr, saddr, dport, inet->inet_sport);
 
+		if (!saddr && ipc.oif)
+			l3mdev_get_saddr(net, ipc.oif, fl4);
+
 		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
 		rt = ip_route_output_flow(net, fl4, sk);
 		if (IS_ERR(rt)) {
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index cd6be73..9f298d0 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -87,17 +87,15 @@
 #ifdef CONFIG_NETFILTER
 	if (!x) {
 		IPCB(skb)->flags |= IPSKB_REROUTED;
-		return dst_output(sk, skb);
+		return dst_output(net, sk, skb);
 	}
 #endif
 
 	return x->outer_mode->afinfo->output_finish(sk, skb);
 }
 
-int xfrm4_output(struct sock *sk, struct sk_buff *skb)
+int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
-
 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 			    net, sk, skb, NULL, skb_dst(skb)->dev,
 			    __xfrm4_output,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 6710110..f2606b9 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -15,7 +15,7 @@
 #include <net/dst.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 
 static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
 
@@ -33,6 +33,8 @@
 	if (saddr)
 		fl4->saddr = saddr->a4;
 
+	fl4->flowi4_flags = FLOWI_FLAG_SKIP_NH_OIF;
+
 	rt = __ip_route_output_key(net, fl4);
 	if (!IS_ERR(rt))
 		return &rt->dst;
@@ -109,10 +111,8 @@
 	struct flowi4 *fl4 = &fl->u.ip4;
 	int oif = 0;
 
-	if (skb_dst(skb)) {
-		oif = vrf_master_ifindex(skb_dst(skb)->dev) ?
-			: skb_dst(skb)->dev->ifindex;
-	}
+	if (skb_dst(skb))
+		oif = l3mdev_fib_oif(skb_dst(skb)->dev);
 
 	memset(fl4, 0, sizeof(struct flowi4));
 	fl4->flowi4_mark = skb->mark;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 75d3dde..c8380f1 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3625,7 +3625,7 @@
 
 	/* send a neighbour solicitation for our addr */
 	addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
-	ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any, NULL);
+	ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any, NULL);
 out:
 	in6_ifa_put(ifp);
 	rtnl_unlock();
@@ -5132,13 +5132,12 @@
 
 			rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
 						       ifp->idev->dev, 0, 0);
-			if (rt && ip6_del_rt(rt))
-				dst_free(&rt->dst);
+			if (rt)
+				ip6_del_rt(rt);
 		}
 		dst_hold(&ifp->rt->dst);
 
-		if (ip6_del_rt(ifp->rt))
-			dst_free(&ifp->rt->dst);
+		ip6_del_rt(ifp->rt);
 
 		rt_genid_bump_ipv6(net);
 		break;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9aadd57..d70b023 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -263,7 +263,7 @@
 
 void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
 {
-	struct ipv6_pinfo *np = inet6_sk(sk);
+	const struct ipv6_pinfo *np = inet6_sk(sk);
 	struct sock_exterr_skb *serr;
 	struct ipv6hdr *iph;
 	struct sk_buff *skb;
diff --git a/net/ipv6/ila.c b/net/ipv6/ila.c
index 678d2df..1a6852e 100644
--- a/net/ipv6/ila.c
+++ b/net/ipv6/ila.c
@@ -91,7 +91,7 @@
 	*(__be64 *)&ip6h->daddr = p->locator;
 }
 
-static int ila_output(struct sock *sk, struct sk_buff *skb)
+static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
 
@@ -100,7 +100,7 @@
 
 	update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
 
-	return dst->lwtstate->orig_output(sk, skb);
+	return dst->lwtstate->orig_output(net, sk, skb);
 
 drop:
 	kfree_skb(skb);
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 6927f3f..5d1c7ce 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -65,17 +65,18 @@
 }
 EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
 
-struct dst_entry *inet6_csk_route_req(struct sock *sk,
+struct dst_entry *inet6_csk_route_req(const struct sock *sk,
 				      struct flowi6 *fl6,
-				      const struct request_sock *req)
+				      const struct request_sock *req,
+				      u8 proto)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
-	struct ipv6_pinfo *np = inet6_sk(sk);
+	const struct ipv6_pinfo *np = inet6_sk(sk);
 	struct in6_addr *final_p, final;
 	struct dst_entry *dst;
 
 	memset(fl6, 0, sizeof(*fl6));
-	fl6->flowi6_proto = IPPROTO_TCP;
+	fl6->flowi6_proto = proto;
 	fl6->daddr = ireq->ir_v6_rmt_addr;
 	final_p = fl6_update_dst(fl6, np->opt, &final);
 	fl6->saddr = ireq->ir_v6_loc_addr;
@@ -91,73 +92,7 @@
 
 	return dst;
 }
-
-/*
- * request_sock (formerly open request) hash tables.
- */
-static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
-			   const u32 rnd, const u32 synq_hsize)
-{
-	u32 c;
-
-	c = jhash_3words((__force u32)raddr->s6_addr32[0],
-			 (__force u32)raddr->s6_addr32[1],
-			 (__force u32)raddr->s6_addr32[2],
-			 rnd);
-
-	c = jhash_2words((__force u32)raddr->s6_addr32[3],
-			 (__force u32)rport,
-			 c);
-
-	return c & (synq_hsize - 1);
-}
-
-struct request_sock *inet6_csk_search_req(struct sock *sk,
-					  const __be16 rport,
-					  const struct in6_addr *raddr,
-					  const struct in6_addr *laddr,
-					  const int iif)
-{
-	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-	struct request_sock *req;
-	u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd,
-				   lopt->nr_table_entries);
-
-	spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
-	for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
-		const struct inet_request_sock *ireq = inet_rsk(req);
-
-		if (ireq->ir_rmt_port == rport &&
-		    req->rsk_ops->family == AF_INET6 &&
-		    ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) &&
-		    ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) &&
-		    (!ireq->ir_iif || ireq->ir_iif == iif)) {
-			atomic_inc(&req->rsk_refcnt);
-			WARN_ON(req->sk != NULL);
-			break;
-		}
-	}
-	spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
-
-	return req;
-}
-EXPORT_SYMBOL_GPL(inet6_csk_search_req);
-
-void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
-				    struct request_sock *req,
-				    const unsigned long timeout)
-{
-	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-	const u32 h = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr,
-				      inet_rsk(req)->ir_rmt_port,
-				      lopt->hash_rnd, lopt->nr_table_entries);
-
-	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
-	inet_csk_reqsk_queue_added(sk, timeout);
-}
-EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);
+EXPORT_SYMBOL(inet6_csk_route_req);
 
 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
 {
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 6ac8dad..21ace5a 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -114,6 +114,8 @@
 				return -1;
 			score++;
 		}
+		if (sk->sk_incoming_cpu == raw_smp_processor_id())
+			score++;
 	}
 	return score;
 }
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 418d982..7d2e002 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -155,6 +155,11 @@
 	kmem_cache_free(fib6_node_kmem, fn);
 }
 
+static void rt6_rcu_free(struct rt6_info *rt)
+{
+	call_rcu(&rt->dst.rcu_head, dst_rcu_free);
+}
+
 static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
 {
 	int cpu;
@@ -169,7 +174,7 @@
 		ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
 		pcpu_rt = *ppcpu_rt;
 		if (pcpu_rt) {
-			dst_free(&pcpu_rt->dst);
+			rt6_rcu_free(pcpu_rt);
 			*ppcpu_rt = NULL;
 		}
 	}
@@ -181,7 +186,7 @@
 {
 	if (atomic_dec_and_test(&rt->rt6i_ref)) {
 		rt6_free_pcpu(rt);
-		dst_free(&rt->dst);
+		rt6_rcu_free(rt);
 	}
 }
 
@@ -846,7 +851,7 @@
 		*ins = rt;
 		rt->rt6i_node = fn;
 		atomic_inc(&rt->rt6i_ref);
-		inet6_rt_notify(RTM_NEWROUTE, rt, info);
+		inet6_rt_notify(RTM_NEWROUTE, rt, info, 0);
 		info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
 
 		if (!(fn->fn_flags & RTN_RTINFO)) {
@@ -872,7 +877,7 @@
 		rt->rt6i_node = fn;
 		rt->dst.rt6_next = iter->dst.rt6_next;
 		atomic_inc(&rt->rt6i_ref);
-		inet6_rt_notify(RTM_NEWROUTE, rt, info);
+		inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
 		if (!(fn->fn_flags & RTN_RTINFO)) {
 			info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
 			fn->fn_flags |= RTN_RTINFO;
@@ -933,6 +938,10 @@
 	int replace_required = 0;
 	int sernum = fib6_new_sernum(info->nl_net);
 
+	if (WARN_ON_ONCE((rt->dst.flags & DST_NOCACHE) &&
+			 !atomic_read(&rt->dst.__refcnt)))
+		return -EINVAL;
+
 	if (info->nlh) {
 		if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
 			allow_create = 0;
@@ -1025,6 +1034,7 @@
 		fib6_start_gc(info->nl_net, rt);
 		if (!(rt->rt6i_flags & RTF_CACHE))
 			fib6_prune_clones(info->nl_net, pn);
+		rt->dst.flags &= ~DST_NOCACHE;
 	}
 
 out:
@@ -1049,7 +1059,8 @@
 			atomic_inc(&pn->leaf->rt6i_ref);
 		}
 #endif
-		dst_free(&rt->dst);
+		if (!(rt->dst.flags & DST_NOCACHE))
+			dst_free(&rt->dst);
 	}
 	return err;
 
@@ -1060,7 +1071,8 @@
 st_failure:
 	if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
 		fib6_repair_tree(info->nl_net, fn);
-	dst_free(&rt->dst);
+	if (!(rt->dst.flags & DST_NOCACHE))
+		dst_free(&rt->dst);
 	return err;
 #endif
 }
@@ -1410,7 +1422,7 @@
 
 	fib6_purge_rt(rt, fn, net);
 
-	inet6_rt_notify(RTM_DELROUTE, rt, info);
+	inet6_rt_notify(RTM_DELROUTE, rt, info, 0);
 	rt6_release(rt);
 }
 
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4038c69..3c7b931 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -404,13 +404,13 @@
 		struct ipv6_tlv_tnl_enc_lim *tel;
 		__u32 mtu;
 	case ICMPV6_DEST_UNREACH:
-		net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
-				     t->parms.name);
+		net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
+				    t->parms.name);
 		break;
 	case ICMPV6_TIME_EXCEED:
 		if (code == ICMPV6_EXC_HOPLIMIT) {
-			net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
-					     t->parms.name);
+			net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
+					    t->parms.name);
 		}
 		break;
 	case ICMPV6_PARAMPROB:
@@ -421,12 +421,12 @@
 		if (teli && teli == be32_to_cpu(info) - 2) {
 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
 			if (tel->encap_limit == 0) {
-				net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
-						     t->parms.name);
+				net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
+						    t->parms.name);
 			}
 		} else {
-			net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
-					     t->parms.name);
+			net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
+					    t->parms.name);
 		}
 		break;
 	case ICMPV6_PKT_TOOBIG:
@@ -634,20 +634,20 @@
 	}
 
 	if (!fl6->flowi6_mark)
-		dst = ip6_tnl_dst_check(tunnel);
+		dst = ip6_tnl_dst_get(tunnel);
 
 	if (!dst) {
-		ndst = ip6_route_output(net, NULL, fl6);
+		dst = ip6_route_output(net, NULL, fl6);
 
-		if (ndst->error)
+		if (dst->error)
 			goto tx_err_link_failure;
-		ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
-		if (IS_ERR(ndst)) {
-			err = PTR_ERR(ndst);
-			ndst = NULL;
+		dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
+		if (IS_ERR(dst)) {
+			err = PTR_ERR(dst);
+			dst = NULL;
 			goto tx_err_link_failure;
 		}
-		dst = ndst;
+		ndst = dst;
 	}
 
 	tdev = dst->dev;
@@ -702,12 +702,9 @@
 		skb = new_skb;
 	}
 
-	if (fl6->flowi6_mark) {
-		skb_dst_set(skb, dst);
-		ndst = NULL;
-	} else {
-		skb_dst_set_noref(skb, dst);
-	}
+	if (!fl6->flowi6_mark && ndst)
+		ip6_tnl_dst_set(tunnel, ndst);
+	skb_dst_set(skb, dst);
 
 	proto = NEXTHDR_GRE;
 	if (encap_limit >= 0) {
@@ -762,14 +759,12 @@
 	skb_set_inner_protocol(skb, protocol);
 
 	ip6tunnel_xmit(NULL, skb, dev);
-	if (ndst)
-		ip6_tnl_dst_store(tunnel, ndst);
 	return 0;
 tx_err_link_failure:
 	stats->tx_carrier_errors++;
 	dst_link_failure(skb);
 tx_err_dst_release:
-	dst_release(ndst);
+	dst_release(dst);
 	return err;
 }
 
@@ -1223,6 +1218,9 @@
 
 static void ip6gre_dev_free(struct net_device *dev)
 {
+	struct ip6_tnl *t = netdev_priv(dev);
+
+	ip6_tnl_dst_destroy(t);
 	free_percpu(dev->tstats);
 	free_netdev(dev);
 }
@@ -1245,9 +1243,10 @@
 	netif_keep_dst(dev);
 }
 
-static int ip6gre_tunnel_init(struct net_device *dev)
+static int ip6gre_tunnel_init_common(struct net_device *dev)
 {
 	struct ip6_tnl *tunnel;
+	int ret;
 
 	tunnel = netdev_priv(dev);
 
@@ -1255,16 +1254,37 @@
 	tunnel->net = dev_net(dev);
 	strcpy(tunnel->parms.name, dev->name);
 
+	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+	if (!dev->tstats)
+		return -ENOMEM;
+
+	ret = ip6_tnl_dst_init(tunnel);
+	if (ret) {
+		free_percpu(dev->tstats);
+		dev->tstats = NULL;
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ip6gre_tunnel_init(struct net_device *dev)
+{
+	struct ip6_tnl *tunnel;
+	int ret;
+
+	ret = ip6gre_tunnel_init_common(dev);
+	if (ret)
+		return ret;
+
+	tunnel = netdev_priv(dev);
+
 	memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
 	memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
 
 	if (ipv6_addr_any(&tunnel->parms.raddr))
 		dev->header_ops = &ip6gre_header_ops;
 
-	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
-	if (!dev->tstats)
-		return -ENOMEM;
-
 	return 0;
 }
 
@@ -1460,19 +1480,16 @@
 static int ip6gre_tap_init(struct net_device *dev)
 {
 	struct ip6_tnl *tunnel;
+	int ret;
+
+	ret = ip6gre_tunnel_init_common(dev);
+	if (ret)
+		return ret;
 
 	tunnel = netdev_priv(dev);
 
-	tunnel->dev = dev;
-	tunnel->net = dev_net(dev);
-	strcpy(tunnel->parms.name, dev->name);
-
 	ip6gre_tnl_link_config(tunnel, 1);
 
-	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
-	if (!dev->tstats)
-		return -ENOMEM;
-
 	return 0;
 }
 
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 291a07b..32583b5 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -56,11 +56,10 @@
 #include <net/checksum.h>
 #include <linux/mroute6.h>
 
-static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
+static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
 	struct net_device *dev = dst->dev;
-	struct net *net = dev_net(dev);
 	struct neighbour *neigh;
 	struct in6_addr *nexthop;
 	int ret;
@@ -126,16 +125,15 @@
 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 	    dst_allfrag(skb_dst(skb)) ||
 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
-		return ip6_fragment(sk, skb, ip6_finish_output2);
+		return ip6_fragment(net, sk, skb, ip6_finish_output2);
 	else
-		return ip6_finish_output2(sk, skb);
+		return ip6_finish_output2(net, sk, skb);
 }
 
-int ip6_output(struct sock *sk, struct sk_buff *skb)
+int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct net_device *dev = skb_dst(skb)->dev;
 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
-	struct net *net = dev_net(dev);
 
 	if (unlikely(idev->cnf.disable_ipv6)) {
 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
@@ -150,14 +148,16 @@
 }
 
 /*
- *	xmit an sk_buff (used by TCP, SCTP and DCCP)
+ * xmit an sk_buff (used by TCP, SCTP and DCCP)
+ * Note : socket lock is not held for SYNACK packets, but might be modified
+ * by calls to skb_set_owner_w() and ipv6_local_error(),
+ * which are using proper atomic operations or spinlocks.
  */
-
-int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 	     struct ipv6_txoptions *opt, int tclass)
 {
 	struct net *net = sock_net(sk);
-	struct ipv6_pinfo *np = inet6_sk(sk);
+	const struct ipv6_pinfo *np = inet6_sk(sk);
 	struct in6_addr *first_hop = &fl6->daddr;
 	struct dst_entry *dst = skb_dst(skb);
 	struct ipv6hdr *hdr;
@@ -186,7 +186,10 @@
 			}
 			consume_skb(skb);
 			skb = skb2;
-			skb_set_owner_w(skb, sk);
+			/* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
+			 * it is safe to call in our context (socket lock not held)
+			 */
+			skb_set_owner_w(skb, (struct sock *)sk);
 		}
 		if (opt->opt_flen)
 			ipv6_push_frag_opts(skb, opt, &proto);
@@ -224,13 +227,20 @@
 	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
 		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
 			      IPSTATS_MIB_OUT, skb->len);
+		/* hooks should never assume socket lock is held.
+		 * we promote our socket to non const
+		 */
 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
-			       net, sk, skb, NULL, dst->dev,
-			       dst_output_okfn);
+			       net, (struct sock *)sk, skb, NULL, dst->dev,
+			       dst_output);
 	}
 
 	skb->dev = dst->dev;
-	ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
+	/* ipv6_local_error() does not require socket lock,
+	 * we promote our socket to non const
+	 */
+	ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
+
 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
 	kfree_skb(skb);
 	return -EMSGSIZE;
@@ -322,7 +332,7 @@
 				     struct sk_buff *skb)
 {
 	skb_sender_cpu_clear(skb);
-	return dst_output(sk, skb);
+	return dst_output(net, sk, skb);
 }
 
 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
@@ -542,8 +552,8 @@
 	skb_copy_secmark(to, from);
 }
 
-int ip6_fragment(struct sock *sk, struct sk_buff *skb,
-		 int (*output)(struct sock *, struct sk_buff *))
+int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+		 int (*output)(struct net *, struct sock *, struct sk_buff *))
 {
 	struct sk_buff *frag;
 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
@@ -556,7 +566,6 @@
 	__be32 frag_id;
 	int ptr, offset = 0, err = 0;
 	u8 *prevhdr, nexthdr = 0;
-	struct net *net = dev_net(skb_dst(skb)->dev);
 
 	hlen = ip6_find_1stfragopt(skb, &prevhdr);
 	nexthdr = *prevhdr;
@@ -588,20 +597,22 @@
 	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
 				    &ipv6_hdr(skb)->saddr);
 
+	hroom = LL_RESERVED_SPACE(rt->dst.dev);
 	if (skb_has_frag_list(skb)) {
 		int first_len = skb_pagelen(skb);
 		struct sk_buff *frag2;
 
 		if (first_len - hlen > mtu ||
 		    ((first_len - hlen) & 7) ||
-		    skb_cloned(skb))
+		    skb_cloned(skb) ||
+		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
 			goto slow_path;
 
 		skb_walk_frags(skb, frag) {
 			/* Correct geometry. */
 			if (frag->len > mtu ||
 			    ((frag->len & 7) && frag->next) ||
-			    skb_headroom(frag) < hlen)
+			    skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
 				goto slow_path_clean;
 
 			/* Partially cloned skb? */
@@ -618,8 +629,6 @@
 
 		err = 0;
 		offset = 0;
-		frag = skb_shinfo(skb)->frag_list;
-		skb_frag_list_init(skb);
 		/* BUILD HEADER */
 
 		*prevhdr = NEXTHDR_FRAGMENT;
@@ -627,8 +636,11 @@
 		if (!tmp_hdr) {
 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 				      IPSTATS_MIB_FRAGFAILS);
-			return -ENOMEM;
+			err = -ENOMEM;
+			goto fail;
 		}
+		frag = skb_shinfo(skb)->frag_list;
+		skb_frag_list_init(skb);
 
 		__skb_pull(skb, hlen);
 		fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
@@ -673,7 +685,7 @@
 				ip6_copy_metadata(frag, skb);
 			}
 
-			err = output(sk, skb);
+			err = output(net, sk, skb);
 			if (!err)
 				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 					      IPSTATS_MIB_FRAGCREATES);
@@ -725,7 +737,6 @@
 	 */
 
 	*prevhdr = NEXTHDR_FRAGMENT;
-	hroom = LL_RESERVED_SPACE(rt->dst.dev);
 	troom = rt->dst.dev->needed_tailroom;
 
 	/*
@@ -802,7 +813,7 @@
 		/*
 		 *	Put this fragment into the sending queue.
 		 */
-		err = output(sk, frag);
+		err = output(net, sk, frag);
 		if (err)
 			goto fail;
 
@@ -883,7 +894,7 @@
 	return dst;
 }
 
-static int ip6_dst_lookup_tail(struct net *net, struct sock *sk,
+static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
 			       struct dst_entry **dst, struct flowi6 *fl6)
 {
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -1014,7 +1025,7 @@
  *	It returns a valid dst pointer on success, or a pointer encoded
  *	error code.
  */
-struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
 				      const struct in6_addr *final_dst)
 {
 	struct dst_entry *dst = NULL;
@@ -1680,7 +1691,7 @@
 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
 	int err;
 
-	err = ip6_local_out(skb);
+	err = ip6_local_out(net, skb->sk, skb);
 	if (err) {
 		if (err > 0)
 			err = net_xmit_errno(err);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index b0ab420..eabffbb 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -126,36 +126,92 @@
  * Locking : hash tables are protected by RCU and RTNL
  */
 
-struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
+static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
+				    struct dst_entry *dst)
 {
-	struct dst_entry *dst = t->dst_cache;
-
-	if (dst && dst->obsolete &&
-	    !dst->ops->check(dst, t->dst_cookie)) {
-		t->dst_cache = NULL;
-		dst_release(dst);
-		return NULL;
+	write_seqlock_bh(&idst->lock);
+	dst_release(rcu_dereference_protected(
+			    idst->dst,
+			    lockdep_is_held(&idst->lock.lock)));
+	if (dst) {
+		dst_hold(dst);
+		idst->cookie = rt6_get_cookie((struct rt6_info *)dst);
+	} else {
+		idst->cookie = 0;
 	}
+	rcu_assign_pointer(idst->dst, dst);
+	write_sequnlock_bh(&idst->lock);
+}
 
+struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t)
+{
+	struct ip6_tnl_dst *idst;
+	struct dst_entry *dst;
+	unsigned int seq;
+	u32 cookie;
+
+	idst = raw_cpu_ptr(t->dst_cache);
+
+	rcu_read_lock();
+	do {
+		seq = read_seqbegin(&idst->lock);
+		dst = rcu_dereference(idst->dst);
+		cookie = idst->cookie;
+	} while (read_seqretry(&idst->lock, seq));
+
+	if (dst && !atomic_inc_not_zero(&dst->__refcnt))
+		dst = NULL;
+	rcu_read_unlock();
+
+	if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) {
+		ip6_tnl_per_cpu_dst_set(idst, NULL);
+		dst_release(dst);
+		dst = NULL;
+	}
 	return dst;
 }
-EXPORT_SYMBOL_GPL(ip6_tnl_dst_check);
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_get);
 
 void ip6_tnl_dst_reset(struct ip6_tnl *t)
 {
-	dst_release(t->dst_cache);
-	t->dst_cache = NULL;
+	int i;
+
+	for_each_possible_cpu(i)
+		ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL);
 }
 EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
 
-void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
+void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst)
 {
-	struct rt6_info *rt = (struct rt6_info *) dst;
-	t->dst_cookie = rt6_get_cookie(rt);
-	dst_release(t->dst_cache);
-	t->dst_cache = dst;
+	ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), dst);
+
 }
-EXPORT_SYMBOL_GPL(ip6_tnl_dst_store);
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_set);
+
+void ip6_tnl_dst_destroy(struct ip6_tnl *t)
+{
+	if (!t->dst_cache)
+		return;
+
+	ip6_tnl_dst_reset(t);
+	free_percpu(t->dst_cache);
+}
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_destroy);
+
+int ip6_tnl_dst_init(struct ip6_tnl *t)
+{
+	int i;
+
+	t->dst_cache = alloc_percpu(struct ip6_tnl_dst);
+	if (!t->dst_cache)
+		return -ENOMEM;
+
+	for_each_possible_cpu(i)
+		seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_init);
 
 /**
  * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
@@ -271,6 +327,9 @@
 
 static void ip6_dev_free(struct net_device *dev)
 {
+	struct ip6_tnl *t = netdev_priv(dev);
+
+	ip6_tnl_dst_destroy(t);
 	free_percpu(dev->tstats);
 	free_netdev(dev);
 }
@@ -510,14 +569,14 @@
 		struct ipv6_tlv_tnl_enc_lim *tel;
 		__u32 mtu;
 	case ICMPV6_DEST_UNREACH:
-		net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
-				     t->parms.name);
+		net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
+				    t->parms.name);
 		rel_msg = 1;
 		break;
 	case ICMPV6_TIME_EXCEED:
 		if ((*code) == ICMPV6_EXC_HOPLIMIT) {
-			net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
-					     t->parms.name);
+			net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
+					    t->parms.name);
 			rel_msg = 1;
 		}
 		break;
@@ -529,13 +588,13 @@
 		if (teli && teli == *info - 2) {
 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
 			if (tel->encap_limit == 0) {
-				net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
-						     t->parms.name);
+				net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
+						    t->parms.name);
 				rel_msg = 1;
 			}
 		} else {
-			net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
-					     t->parms.name);
+			net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
+					    t->parms.name);
 		}
 		break;
 	case ICMPV6_PKT_TOOBIG:
@@ -1010,23 +1069,23 @@
 		memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
 		neigh_release(neigh);
 	} else if (!fl6->flowi6_mark)
-		dst = ip6_tnl_dst_check(t);
+		dst = ip6_tnl_dst_get(t);
 
 	if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
 		goto tx_err_link_failure;
 
 	if (!dst) {
-		ndst = ip6_route_output(net, NULL, fl6);
+		dst = ip6_route_output(net, NULL, fl6);
 
-		if (ndst->error)
+		if (dst->error)
 			goto tx_err_link_failure;
-		ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
-		if (IS_ERR(ndst)) {
-			err = PTR_ERR(ndst);
-			ndst = NULL;
+		dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
+		if (IS_ERR(dst)) {
+			err = PTR_ERR(dst);
+			dst = NULL;
 			goto tx_err_link_failure;
 		}
-		dst = ndst;
+		ndst = dst;
 	}
 
 	tdev = dst->dev;
@@ -1072,12 +1131,11 @@
 		consume_skb(skb);
 		skb = new_skb;
 	}
-	if (fl6->flowi6_mark) {
-		skb_dst_set(skb, dst);
-		ndst = NULL;
-	} else {
-		skb_dst_set_noref(skb, dst);
-	}
+
+	if (!fl6->flowi6_mark && ndst)
+		ip6_tnl_dst_set(t, ndst);
+	skb_dst_set(skb, dst);
+
 	skb->transport_header = skb->network_header;
 
 	proto = fl6->flowi6_proto;
@@ -1101,14 +1159,12 @@
 	ipv6h->saddr = fl6->saddr;
 	ipv6h->daddr = fl6->daddr;
 	ip6tunnel_xmit(NULL, skb, dev);
-	if (ndst)
-		ip6_tnl_dst_store(t, ndst);
 	return 0;
 tx_err_link_failure:
 	stats->tx_carrier_errors++;
 	dst_link_failure(skb);
 tx_err_dst_release:
-	dst_release(ndst);
+	dst_release(dst);
 	return err;
 }
 
@@ -1573,12 +1629,21 @@
 ip6_tnl_dev_init_gen(struct net_device *dev)
 {
 	struct ip6_tnl *t = netdev_priv(dev);
+	int ret;
 
 	t->dev = dev;
 	t->net = dev_net(dev);
 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
 	if (!dev->tstats)
 		return -ENOMEM;
+
+	ret = ip6_tnl_dst_init(t);
+	if (ret) {
+		free_percpu(dev->tstats);
+		dev->tstats = NULL;
+		return ret;
+	}
+
 	return 0;
 }
 
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index f96f1c1..0a8610b 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -482,7 +482,7 @@
 		return -EMSGSIZE;
 	}
 
-	err = dst_output(skb->sk, skb);
+	err = dst_output(t->net, skb->sk, skb);
 	if (net_xmit_eval(err) == 0) {
 		struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
 
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 5e5d16e..ad19136 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1991,7 +1991,7 @@
 			 IPSTATS_MIB_OUTFORWDATAGRAMS);
 	IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
 			 IPSTATS_MIB_OUTOCTETS, skb->len);
-	return dst_output(sk, skb);
+	return dst_output(net, sk, skb);
 }
 
 /*
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index a8bf57c..124338a 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1646,7 +1646,7 @@
 
 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 		      net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
-		      dst_output_okfn);
+		      dst_output);
 out:
 	if (!err) {
 		ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
@@ -2010,7 +2010,7 @@
 	skb_dst_set(skb, dst);
 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 		      net, sk, skb, NULL, skb->dev,
-		      dst_output_okfn);
+		      dst_output);
 out:
 	if (!err) {
 		ICMP6MSGOUT_INC_STATS(net, idev, type);
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index b9779d4..60c79a0 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -118,7 +118,7 @@
 
 struct mip6_report_rate_limiter {
 	spinlock_t lock;
-	struct timeval stamp;
+	ktime_t stamp;
 	int iif;
 	struct in6_addr src;
 	struct in6_addr dst;
@@ -184,20 +184,18 @@
 	return 0;
 }
 
-static inline int mip6_report_rl_allow(struct timeval *stamp,
+static inline int mip6_report_rl_allow(ktime_t stamp,
 				       const struct in6_addr *dst,
 				       const struct in6_addr *src, int iif)
 {
 	int allow = 0;
 
 	spin_lock_bh(&mip6_report_rl.lock);
-	if (mip6_report_rl.stamp.tv_sec != stamp->tv_sec ||
-	    mip6_report_rl.stamp.tv_usec != stamp->tv_usec ||
+	if (!ktime_equal(mip6_report_rl.stamp, stamp) ||
 	    mip6_report_rl.iif != iif ||
 	    !ipv6_addr_equal(&mip6_report_rl.src, src) ||
 	    !ipv6_addr_equal(&mip6_report_rl.dst, dst)) {
-		mip6_report_rl.stamp.tv_sec = stamp->tv_sec;
-		mip6_report_rl.stamp.tv_usec = stamp->tv_usec;
+		mip6_report_rl.stamp = stamp;
 		mip6_report_rl.iif = iif;
 		mip6_report_rl.src = *src;
 		mip6_report_rl.dst = *dst;
@@ -216,7 +214,7 @@
 	struct ipv6_destopt_hao *hao = NULL;
 	struct xfrm_selector sel;
 	int offset;
-	struct timeval stamp;
+	ktime_t stamp;
 	int err = 0;
 
 	if (unlikely(fl6->flowi6_proto == IPPROTO_MH &&
@@ -230,9 +228,9 @@
 					(skb_network_header(skb) + offset);
 	}
 
-	skb_get_timestamp(skb, &stamp);
+	stamp = skb_get_ktime(skb);
 
-	if (!mip6_report_rl_allow(&stamp, &ipv6_hdr(skb)->daddr,
+	if (!mip6_report_rl_allow(stamp, &ipv6_hdr(skb)->daddr,
 				  hao ? &hao->addr : &ipv6_hdr(skb)->saddr,
 				  opt->iif))
 		goto out;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index dde5a1e..b18012f 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -465,7 +465,7 @@
 
 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 		      net, sk, skb, NULL, dst->dev,
-		      dst_output_okfn);
+		      dst_output);
 	if (!err) {
 		ICMP6MSGOUT_INC_STATS(net, idev, type);
 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
@@ -474,8 +474,7 @@
 	rcu_read_unlock();
 }
 
-void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
-		   const struct in6_addr *daddr,
+void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
 		   const struct in6_addr *solicited_addr,
 		   bool router, bool solicited, bool override, bool inc_opt)
 {
@@ -541,7 +540,7 @@
 
 	read_lock_bh(&idev->lock);
 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
-		ndisc_send_na(dev, NULL, &in6addr_linklocal_allnodes, &ifa->addr,
+		ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifa->addr,
 			      /*router=*/ !!idev->cnf.forwarding,
 			      /*solicited=*/ false, /*override=*/ true,
 			      /*inc_opt=*/ true);
@@ -551,8 +550,7 @@
 	in6_dev_put(idev);
 }
 
-void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
-		   const struct in6_addr *solicit,
+void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
 		   const struct in6_addr *daddr, const struct in6_addr *saddr,
 		   struct sk_buff *oskb)
 {
@@ -679,12 +677,12 @@
 				  "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
 				  __func__, target);
 		}
-		ndisc_send_ns(dev, neigh, target, target, saddr, skb);
+		ndisc_send_ns(dev, target, target, saddr, skb);
 	} else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
 		neigh_app_ns(neigh);
 	} else {
 		addrconf_addr_solict_mult(target, &mcaddr);
-		ndisc_send_ns(dev, NULL, target, &mcaddr, saddr, skb);
+		ndisc_send_ns(dev, target, &mcaddr, saddr, skb);
 	}
 }
 
@@ -828,7 +826,7 @@
 		is_router = idev->cnf.forwarding;
 
 	if (dad) {
-		ndisc_send_na(dev, NULL, &in6addr_linklocal_allnodes, &msg->target,
+		ndisc_send_na(dev, &in6addr_linklocal_allnodes, &msg->target,
 			      !!is_router, false, (ifp != NULL), true);
 		goto out;
 	}
@@ -849,8 +847,7 @@
 			     NEIGH_UPDATE_F_WEAK_OVERRIDE|
 			     NEIGH_UPDATE_F_OVERRIDE);
 	if (neigh || !dev->header_ops) {
-		ndisc_send_na(dev, neigh, saddr, &msg->target,
-			      !!is_router,
+		ndisc_send_na(dev, saddr, &msg->target, !!is_router,
 			      true, (ifp != NULL && inc), inc);
 		if (neigh)
 			neigh_release(neigh);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index b4de08a..d11c468 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -18,9 +18,8 @@
 #include <net/ip6_checksum.h>
 #include <net/netfilter/nf_queue.h>
 
-int ip6_route_me_harder(struct sk_buff *skb)
+int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
 	const struct ipv6hdr *iph = ipv6_hdr(skb);
 	unsigned int hh_len;
 	struct dst_entry *dst;
@@ -93,7 +92,7 @@
 	}
 }
 
-static int nf_ip6_reroute(struct sk_buff *skb,
+static int nf_ip6_reroute(struct net *net, struct sk_buff *skb,
 			  const struct nf_queue_entry *entry)
 {
 	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
@@ -103,7 +102,7 @@
 		if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
 		    !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
 		    skb->mark != rt_info->mark)
-			return ip6_route_me_harder(skb);
+			return ip6_route_me_harder(net, skb);
 	}
 	return 0;
 }
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index cd9b401..80e3bd7 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -314,10 +314,10 @@
 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
 unsigned int
 ip6t_do_table(struct sk_buff *skb,
-	      unsigned int hook,
 	      const struct nf_hook_state *state,
 	      struct xt_table *table)
 {
+	unsigned int hook = state->hook;
 	static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
 	/* Initializing verdict to NF_DROP keeps gcc happy. */
 	unsigned int verdict = NF_DROP;
@@ -340,6 +340,7 @@
 	 * rule is also a fragment-specific rule, non-fragments won't
 	 * match it. */
 	acpar.hotdrop = false;
+	acpar.net     = state->net;
 	acpar.in      = state->in;
 	acpar.out     = state->out;
 	acpar.family  = NFPROTO_IPV6;
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 0ed841a..db29bbf 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -39,7 +39,7 @@
 reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct ip6t_reject_info *reject = par->targinfo;
-	struct net *net = dev_net((par->in != NULL) ? par->in : par->out);
+	struct net *net = par->net;
 
 	switch (reject->with) {
 	case IP6T_ICMP6_NO_ROUTE:
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 4c9f3e7..a10a2a9 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -76,7 +76,7 @@
 		nf_conntrack_get(nfct);
 	}
 
-	ip6_local_out(nskb);
+	ip6_local_out(net, nskb->sk, nskb);
 	return;
 
 free_nskb:
@@ -275,7 +275,7 @@
 synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_synproxy_info *info = par->targinfo;
-	struct synproxy_net *snet = synproxy_pernet(dev_net(par->in));
+	struct synproxy_net *snet = synproxy_pernet(par->net);
 	struct synproxy_options opts = {};
 	struct tcphdr *th, _th;
 
@@ -316,7 +316,7 @@
 	return XT_CONTINUE;
 }
 
-static unsigned int ipv6_synproxy_hook(const struct nf_hook_ops *ops,
+static unsigned int ipv6_synproxy_hook(void *priv,
 				       struct sk_buff *skb,
 				       const struct nf_hook_state *nhs)
 {
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index 790e0c6..1ee1b25 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -26,7 +26,7 @@
 	return addr_type & IPV6_ADDR_UNICAST;
 }
 
-static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,
+static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
 				     const struct net_device *dev, u8 flags)
 {
 	struct rt6_info *rt;
@@ -53,7 +53,7 @@
 		lookup_flags |= RT6_LOOKUP_F_IFACE;
 	}
 
-	rt = (void *) ip6_route_lookup(dev_net(dev), &fl6, lookup_flags);
+	rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags);
 	if (rt->dst.error)
 		goto out;
 
@@ -93,7 +93,7 @@
 	if (unlikely(saddrtype == IPV6_ADDR_ANY))
 		return true ^ invert; /* not routable: forward path will drop it */
 
-	return rpfilter_lookup_reverse6(skb, par->in, info->flags) ^ invert;
+	return rpfilter_lookup_reverse6(par->net, skb, par->in, info->flags) ^ invert;
 }
 
 static int rpfilter_check(const struct xt_mtchk_param *par)
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 2449005..8b277b9 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -32,11 +32,10 @@
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-ip6table_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip6table_filter_hook(void *priv, struct sk_buff *skb,
 		     const struct nf_hook_state *state)
 {
-	return ip6t_do_table(skb, ops->hooknum, state,
-			     state->net->ipv6.ip6table_filter);
+	return ip6t_do_table(skb, state, state->net->ipv6.ip6table_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index a46dbf0..abe278b 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -57,8 +57,7 @@
 	/* flowlabel and prio (includes version, which shouldn't change either */
 	flowlabel = *((u_int32_t *)ipv6_hdr(skb));
 
-	ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, state,
-			    state->net->ipv6.ip6table_mangle);
+	ret = ip6t_do_table(skb, state, state->net->ipv6.ip6table_mangle);
 
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) ||
@@ -66,7 +65,7 @@
 	     skb->mark != mark ||
 	     ipv6_hdr(skb)->hop_limit != hop_limit ||
 	     flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
-		err = ip6_route_me_harder(skb);
+		err = ip6_route_me_harder(state->net, skb);
 		if (err < 0)
 			ret = NF_DROP_ERR(err);
 	}
@@ -76,17 +75,16 @@
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip6table_mangle_hook(void *priv, struct sk_buff *skb,
 		     const struct nf_hook_state *state)
 {
-	if (ops->hooknum == NF_INET_LOCAL_OUT)
+	if (state->hook == NF_INET_LOCAL_OUT)
 		return ip6t_mangle_out(skb, state);
-	if (ops->hooknum == NF_INET_POST_ROUTING)
-		return ip6t_do_table(skb, ops->hooknum, state,
+	if (state->hook == NF_INET_POST_ROUTING)
+		return ip6t_do_table(skb, state,
 				     state->net->ipv6.ip6table_mangle);
 	/* INPUT/FORWARD */
-	return ip6t_do_table(skb, ops->hooknum, state,
-			     state->net->ipv6.ip6table_mangle);
+	return ip6t_do_table(skb, state, state->net->ipv6.ip6table_mangle);
 }
 
 static struct nf_hook_ops *mangle_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index a56451d..abea175 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -30,41 +30,40 @@
 	.af		= NFPROTO_IPV6,
 };
 
-static unsigned int ip6table_nat_do_chain(const struct nf_hook_ops *ops,
+static unsigned int ip6table_nat_do_chain(void *priv,
 					  struct sk_buff *skb,
 					  const struct nf_hook_state *state,
 					  struct nf_conn *ct)
 {
-	return ip6t_do_table(skb, ops->hooknum, state,
-			     state->net->ipv6.ip6table_nat);
+	return ip6t_do_table(skb, state, state->net->ipv6.ip6table_nat);
 }
 
-static unsigned int ip6table_nat_fn(const struct nf_hook_ops *ops,
+static unsigned int ip6table_nat_fn(void *priv,
 				    struct sk_buff *skb,
 				    const struct nf_hook_state *state)
 {
-	return nf_nat_ipv6_fn(ops, skb, state, ip6table_nat_do_chain);
+	return nf_nat_ipv6_fn(priv, skb, state, ip6table_nat_do_chain);
 }
 
-static unsigned int ip6table_nat_in(const struct nf_hook_ops *ops,
+static unsigned int ip6table_nat_in(void *priv,
 				    struct sk_buff *skb,
 				    const struct nf_hook_state *state)
 {
-	return nf_nat_ipv6_in(ops, skb, state, ip6table_nat_do_chain);
+	return nf_nat_ipv6_in(priv, skb, state, ip6table_nat_do_chain);
 }
 
-static unsigned int ip6table_nat_out(const struct nf_hook_ops *ops,
+static unsigned int ip6table_nat_out(void *priv,
 				     struct sk_buff *skb,
 				     const struct nf_hook_state *state)
 {
-	return nf_nat_ipv6_out(ops, skb, state, ip6table_nat_do_chain);
+	return nf_nat_ipv6_out(priv, skb, state, ip6table_nat_do_chain);
 }
 
-static unsigned int ip6table_nat_local_fn(const struct nf_hook_ops *ops,
+static unsigned int ip6table_nat_local_fn(void *priv,
 					  struct sk_buff *skb,
 					  const struct nf_hook_state *state)
 {
-	return nf_nat_ipv6_local_fn(ops, skb, state, ip6table_nat_do_chain);
+	return nf_nat_ipv6_local_fn(priv, skb, state, ip6table_nat_do_chain);
 }
 
 static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 18e831e..9021963 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -19,11 +19,10 @@
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-ip6table_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip6table_raw_hook(void *priv, struct sk_buff *skb,
 		  const struct nf_hook_state *state)
 {
-	return ip6t_do_table(skb, ops->hooknum, state,
-			     state->net->ipv6.ip6table_raw);
+	return ip6t_do_table(skb, state, state->net->ipv6.ip6table_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index 83bc96a..0d856fe 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -36,11 +36,10 @@
 };
 
 static unsigned int
-ip6table_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip6table_security_hook(void *priv, struct sk_buff *skb,
 		       const struct nf_hook_state *state)
 {
-	return ip6t_do_table(skb, ops->hooknum, state,
-			     state->net->ipv6.ip6table_security);
+	return ip6t_do_table(skb, state, state->net->ipv6.ip6table_security);
 }
 
 static struct nf_hook_ops *sectbl_ops __read_mostly;
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 1ef1b79..dd83ad4 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -95,7 +95,7 @@
 	return NF_ACCEPT;
 }
 
-static unsigned int ipv6_helper(const struct nf_hook_ops *ops,
+static unsigned int ipv6_helper(void *priv,
 				struct sk_buff *skb,
 				const struct nf_hook_state *state)
 {
@@ -131,7 +131,7 @@
 	return helper->help(skb, protoff, ct, ctinfo);
 }
 
-static unsigned int ipv6_confirm(const struct nf_hook_ops *ops,
+static unsigned int ipv6_confirm(void *priv,
 				 struct sk_buff *skb,
 				 const struct nf_hook_state *state)
 {
@@ -165,14 +165,14 @@
 	return nf_conntrack_confirm(skb);
 }
 
-static unsigned int ipv6_conntrack_in(const struct nf_hook_ops *ops,
+static unsigned int ipv6_conntrack_in(void *priv,
 				      struct sk_buff *skb,
 				      const struct nf_hook_state *state)
 {
-	return nf_conntrack_in(state->net, PF_INET6, ops->hooknum, skb);
+	return nf_conntrack_in(state->net, PF_INET6, state->hook, skb);
 }
 
-static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops,
+static unsigned int ipv6_conntrack_local(void *priv,
 					 struct sk_buff *skb,
 					 const struct nf_hook_state *state)
 {
@@ -181,7 +181,7 @@
 		net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
 		return NF_ACCEPT;
 	}
-	return nf_conntrack_in(state->net, PF_INET6, ops->hooknum, skb);
+	return nf_conntrack_in(state->net, PF_INET6, state->hook, skb);
 }
 
 static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 0e6fae1..d3b7974 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -36,6 +36,7 @@
 
 static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
 				unsigned int dataoff,
+				struct net *net,
 				struct nf_conntrack_tuple *tuple)
 {
 	const struct icmp6hdr *hp;
@@ -159,7 +160,7 @@
 			       skb_network_offset(skb)
 				+ sizeof(struct ipv6hdr)
 				+ sizeof(struct icmp6hdr),
-			       PF_INET6, &origtuple)) {
+			       PF_INET6, net, &origtuple)) {
 		pr_debug("icmpv6_error: Can't get tuple\n");
 		return -NF_ACCEPT;
 	}
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 701cd2b..2fb86a9 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -563,12 +563,10 @@
 	return 0;
 }
 
-struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
+struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
 {
 	struct sk_buff *clone;
 	struct net_device *dev = skb->dev;
-	struct net *net = skb_dst(skb) ? dev_net(skb_dst(skb)->dev)
-				       : dev_net(skb->dev);
 	struct frag_hdr *fhdr;
 	struct frag_queue *fq;
 	struct ipv6hdr *hdr;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index 6b576be..5173a89 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -51,7 +51,7 @@
 		return IP6_DEFRAG_CONNTRACK_OUT + zone_id;
 }
 
-static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
+static unsigned int ipv6_defrag(void *priv,
 				struct sk_buff *skb,
 				const struct nf_hook_state *state)
 {
@@ -63,7 +63,8 @@
 		return NF_ACCEPT;
 #endif
 
-	reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(ops->hooknum, skb));
+	reasm = nf_ct_frag6_gather(state->net, skb,
+				   nf_ct6_defrag_user(state->hook, skb));
 	/* queued */
 	if (reasm == NULL)
 		return NF_STOLEN;
@@ -74,7 +75,7 @@
 
 	nf_ct_frag6_consume_orig(reasm);
 
-	NF_HOOK_THRESH(NFPROTO_IPV6, ops->hooknum, state->net, state->sk, reasm,
+	NF_HOOK_THRESH(NFPROTO_IPV6, state->hook, state->net, state->sk, reasm,
 		       state->in, state->out,
 		       state->okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
 
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
index c8ab626..6989c70 100644
--- a/net/ipv6/netfilter/nf_dup_ipv6.c
+++ b/net/ipv6/netfilter/nf_dup_ipv6.c
@@ -19,25 +19,10 @@
 #include <net/netfilter/nf_conntrack.h>
 #endif
 
-static struct net *pick_net(struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_NS
-	const struct dst_entry *dst;
-
-	if (skb->dev != NULL)
-		return dev_net(skb->dev);
-	dst = skb_dst(skb);
-	if (dst != NULL && dst->dev != NULL)
-		return dev_net(dst->dev);
-#endif
-	return &init_net;
-}
-
-static bool nf_dup_ipv6_route(struct sk_buff *skb, const struct in6_addr *gw,
-			      int oif)
+static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
+			      const struct in6_addr *gw, int oif)
 {
 	const struct ipv6hdr *iph = ipv6_hdr(skb);
-	struct net *net = pick_net(skb);
 	struct dst_entry *dst;
 	struct flowi6 fl6;
 
@@ -61,7 +46,7 @@
 	return true;
 }
 
-void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
 		 const struct in6_addr *gw, int oif)
 {
 	if (this_cpu_read(nf_skb_duplicated))
@@ -81,9 +66,9 @@
 		struct ipv6hdr *iph = ipv6_hdr(skb);
 		--iph->hop_limit;
 	}
-	if (nf_dup_ipv6_route(skb, gw, oif)) {
+	if (nf_dup_ipv6_route(net, skb, gw, oif)) {
 		__this_cpu_write(nf_skb_duplicated, true);
-		ip6_local_out(skb);
+		ip6_local_out(net, skb->sk, skb);
 		__this_cpu_write(nf_skb_duplicated, false);
 	} else {
 		kfree_skb(skb);
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index 70fbaed..238e70c 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -262,9 +262,9 @@
 EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
 
 unsigned int
-nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
 	       const struct nf_hook_state *state,
-	       unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+	       unsigned int (*do_chain)(void *priv,
 					struct sk_buff *skb,
 					const struct nf_hook_state *state,
 					struct nf_conn *ct))
@@ -272,7 +272,7 @@
 	struct nf_conn *ct;
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn_nat *nat;
-	enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
+	enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
 	__be16 frag_off;
 	int hdrlen;
 	u8 nexthdr;
@@ -303,7 +303,7 @@
 
 		if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
 			if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
-							     ops->hooknum,
+							     state->hook,
 							     hdrlen))
 				return NF_DROP;
 			else
@@ -317,21 +317,21 @@
 		if (!nf_nat_initialized(ct, maniptype)) {
 			unsigned int ret;
 
-			ret = do_chain(ops, skb, state, ct);
+			ret = do_chain(priv, skb, state, ct);
 			if (ret != NF_ACCEPT)
 				return ret;
 
-			if (nf_nat_initialized(ct, HOOK2MANIP(ops->hooknum)))
+			if (nf_nat_initialized(ct, HOOK2MANIP(state->hook)))
 				break;
 
-			ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
+			ret = nf_nat_alloc_null_binding(ct, state->hook);
 			if (ret != NF_ACCEPT)
 				return ret;
 		} else {
 			pr_debug("Already setup manip %s for ct %p\n",
 				 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
 				 ct);
-			if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
+			if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
 				goto oif_changed;
 		}
 		break;
@@ -340,11 +340,11 @@
 		/* ESTABLISHED */
 		NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
 			     ctinfo == IP_CT_ESTABLISHED_REPLY);
-		if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
+		if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
 			goto oif_changed;
 	}
 
-	return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+	return nf_nat_packet(ct, ctinfo, state->hook, skb);
 
 oif_changed:
 	nf_ct_kill_acct(ct, ctinfo, skb);
@@ -353,9 +353,9 @@
 EXPORT_SYMBOL_GPL(nf_nat_ipv6_fn);
 
 unsigned int
-nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+nf_nat_ipv6_in(void *priv, struct sk_buff *skb,
 	       const struct nf_hook_state *state,
-	       unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+	       unsigned int (*do_chain)(void *priv,
 					struct sk_buff *skb,
 					const struct nf_hook_state *state,
 					struct nf_conn *ct))
@@ -363,7 +363,7 @@
 	unsigned int ret;
 	struct in6_addr daddr = ipv6_hdr(skb)->daddr;
 
-	ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
+	ret = nf_nat_ipv6_fn(priv, skb, state, do_chain);
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
 		skb_dst_drop(skb);
@@ -373,9 +373,9 @@
 EXPORT_SYMBOL_GPL(nf_nat_ipv6_in);
 
 unsigned int
-nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
 		const struct nf_hook_state *state,
-		unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+		unsigned int (*do_chain)(void *priv,
 					 struct sk_buff *skb,
 					 const struct nf_hook_state *state,
 					 struct nf_conn *ct))
@@ -391,7 +391,7 @@
 	if (skb->len < sizeof(struct ipv6hdr))
 		return NF_ACCEPT;
 
-	ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
+	ret = nf_nat_ipv6_fn(priv, skb, state, do_chain);
 #ifdef CONFIG_XFRM
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
@@ -403,7 +403,7 @@
 		    (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
 		     ct->tuplehash[dir].tuple.src.u.all !=
 		     ct->tuplehash[!dir].tuple.dst.u.all)) {
-			err = nf_xfrm_me_harder(skb, AF_INET6);
+			err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
 			if (err < 0)
 				ret = NF_DROP_ERR(err);
 		}
@@ -414,9 +414,9 @@
 EXPORT_SYMBOL_GPL(nf_nat_ipv6_out);
 
 unsigned int
-nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
 		     const struct nf_hook_state *state,
-		     unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+		     unsigned int (*do_chain)(void *priv,
 					      struct sk_buff *skb,
 					      const struct nf_hook_state *state,
 					      struct nf_conn *ct))
@@ -430,14 +430,14 @@
 	if (skb->len < sizeof(struct ipv6hdr))
 		return NF_ACCEPT;
 
-	ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
+	ret = nf_nat_ipv6_fn(priv, skb, state, do_chain);
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
 		enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
 
 		if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
 				      &ct->tuplehash[!dir].tuple.src.u3)) {
-			err = ip6_route_me_harder(skb);
+			err = ip6_route_me_harder(state->net, skb);
 			if (err < 0)
 				ret = NF_DROP_ERR(err);
 		}
@@ -446,7 +446,7 @@
 			 ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
 			 ct->tuplehash[dir].tuple.dst.u.all !=
 			 ct->tuplehash[!dir].tuple.src.u.all) {
-			err = nf_xfrm_me_harder(skb, AF_INET6);
+			err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
 			if (err < 0)
 				ret = NF_DROP_ERR(err);
 		}
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
index 7745609..31ba7ca 100644
--- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
@@ -34,7 +34,7 @@
 	NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
 			    ctinfo == IP_CT_RELATED_REPLY));
 
-	if (ipv6_dev_get_saddr(dev_net(out), out,
+	if (ipv6_dev_get_saddr(nf_ct_net(ct), out,
 			       &ipv6_hdr(skb)->daddr, 0, &src) < 0)
 		return NF_DROP;
 
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 94b4c6d..7309e47 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -206,7 +206,7 @@
 		dev_queue_xmit(nskb);
 	} else
 #endif
-		ip6_local_out(nskb);
+		ip6_local_out(net, nskb->sk, nskb);
 }
 EXPORT_SYMBOL_GPL(nf_send_reset6);
 
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c
index c8148ba..120ea91 100644
--- a/net/ipv6/netfilter/nf_tables_ipv6.c
+++ b/net/ipv6/netfilter/nf_tables_ipv6.c
@@ -16,20 +16,20 @@
 #include <net/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables_ipv6.h>
 
-static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops,
+static unsigned int nft_do_chain_ipv6(void *priv,
 				      struct sk_buff *skb,
 				      const struct nf_hook_state *state)
 {
 	struct nft_pktinfo pkt;
 
 	/* malformed packet, drop it */
-	if (nft_set_pktinfo_ipv6(&pkt, ops, skb, state) < 0)
+	if (nft_set_pktinfo_ipv6(&pkt, skb, state) < 0)
 		return NF_DROP;
 
-	return nft_do_chain(&pkt, ops);
+	return nft_do_chain(&pkt, priv);
 }
 
-static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
+static unsigned int nft_ipv6_output(void *priv,
 				    struct sk_buff *skb,
 				    const struct nf_hook_state *state)
 {
@@ -40,7 +40,7 @@
 		return NF_ACCEPT;
 	}
 
-	return nft_do_chain_ipv6(ops, skb, state);
+	return nft_do_chain_ipv6(priv, skb, state);
 }
 
 struct nft_af_info nft_af_ipv6 __read_mostly = {
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
index 951bb45..443cd30 100644
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -24,44 +24,44 @@
 #include <net/netfilter/nf_nat_l3proto.h>
 #include <net/ipv6.h>
 
-static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops,
+static unsigned int nft_nat_do_chain(void *priv,
 				     struct sk_buff *skb,
 				     const struct nf_hook_state *state,
 				     struct nf_conn *ct)
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo_ipv6(&pkt, ops, skb, state);
+	nft_set_pktinfo_ipv6(&pkt, skb, state);
 
-	return nft_do_chain(&pkt, ops);
+	return nft_do_chain(&pkt, priv);
 }
 
-static unsigned int nft_nat_ipv6_fn(const struct nf_hook_ops *ops,
+static unsigned int nft_nat_ipv6_fn(void *priv,
 				    struct sk_buff *skb,
 				    const struct nf_hook_state *state)
 {
-	return nf_nat_ipv6_fn(ops, skb, state, nft_nat_do_chain);
+	return nf_nat_ipv6_fn(priv, skb, state, nft_nat_do_chain);
 }
 
-static unsigned int nft_nat_ipv6_in(const struct nf_hook_ops *ops,
+static unsigned int nft_nat_ipv6_in(void *priv,
 				    struct sk_buff *skb,
 				    const struct nf_hook_state *state)
 {
-	return nf_nat_ipv6_in(ops, skb, state, nft_nat_do_chain);
+	return nf_nat_ipv6_in(priv, skb, state, nft_nat_do_chain);
 }
 
-static unsigned int nft_nat_ipv6_out(const struct nf_hook_ops *ops,
+static unsigned int nft_nat_ipv6_out(void *priv,
 				     struct sk_buff *skb,
 				     const struct nf_hook_state *state)
 {
-	return nf_nat_ipv6_out(ops, skb, state, nft_nat_do_chain);
+	return nf_nat_ipv6_out(priv, skb, state, nft_nat_do_chain);
 }
 
-static unsigned int nft_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
+static unsigned int nft_nat_ipv6_local_fn(void *priv,
 					  struct sk_buff *skb,
 					  const struct nf_hook_state *state)
 {
-	return nf_nat_ipv6_local_fn(ops, skb, state, nft_nat_do_chain);
+	return nf_nat_ipv6_local_fn(priv, skb, state, nft_nat_do_chain);
 }
 
 static const struct nf_chain_type nft_chain_nat_ipv6 = {
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
index 0dafdaa..9df75bd 100644
--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -22,7 +22,7 @@
 #include <net/netfilter/nf_tables_ipv6.h>
 #include <net/route.h>
 
-static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
+static unsigned int nf_route_table_hook(void *priv,
 					struct sk_buff *skb,
 					const struct nf_hook_state *state)
 {
@@ -33,7 +33,7 @@
 	u32 mark, flowlabel;
 
 	/* malformed packet, drop it */
-	if (nft_set_pktinfo_ipv6(&pkt, ops, skb, state) < 0)
+	if (nft_set_pktinfo_ipv6(&pkt, skb, state) < 0)
 		return NF_DROP;
 
 	/* save source/dest address, mark, hoplimit, flowlabel, priority */
@@ -45,14 +45,14 @@
 	/* flowlabel and prio (includes version, which shouldn't change either */
 	flowlabel = *((u32 *)ipv6_hdr(skb));
 
-	ret = nft_do_chain(&pkt, ops);
+	ret = nft_do_chain(&pkt, priv);
 	if (ret != NF_DROP && ret != NF_QUEUE &&
 	    (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
 	     memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
 	     skb->mark != mark ||
 	     ipv6_hdr(skb)->hop_limit != hop_limit ||
 	     flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
-		return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP;
+		return ip6_route_me_harder(state->net, skb) == 0 ? ret : NF_DROP;
 
 	return ret;
 }
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
index 0eaa4f6..8bfd470 100644
--- a/net/ipv6/netfilter/nft_dup_ipv6.c
+++ b/net/ipv6/netfilter/nft_dup_ipv6.c
@@ -28,7 +28,7 @@
 	struct in6_addr *gw = (struct in6_addr *)&regs->data[priv->sreg_addr];
 	int oif = regs->data[priv->sreg_dev];
 
-	nf_dup_ipv6(pkt->skb, pkt->ops->hooknum, gw, oif);
+	nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif);
 }
 
 static int nft_dup_ipv6_init(const struct nft_ctx *ctx,
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
index effd393..aca44e8 100644
--- a/net/ipv6/netfilter/nft_redir_ipv6.c
+++ b/net/ipv6/netfilter/nft_redir_ipv6.c
@@ -35,8 +35,7 @@
 
 	range.flags |= priv->flags;
 
-	regs->verdict.code = nf_nat_redirect_ipv6(pkt->skb, &range,
-						  pkt->ops->hooknum);
+	regs->verdict.code = nf_nat_redirect_ipv6(pkt->skb, &range, pkt->hook);
 }
 
 static struct nft_expr_type nft_redir_ipv6_type;
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
index d0d1540..533cd57 100644
--- a/net/ipv6/netfilter/nft_reject_ipv6.c
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -24,15 +24,14 @@
 				 const struct nft_pktinfo *pkt)
 {
 	struct nft_reject *priv = nft_expr_priv(expr);
-	struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
 
 	switch (priv->type) {
 	case NFT_REJECT_ICMP_UNREACH:
-		nf_send_unreach6(net, pkt->skb, priv->icmp_code,
-				 pkt->ops->hooknum);
+		nf_send_unreach6(pkt->net, pkt->skb, priv->icmp_code,
+				 pkt->hook);
 		break;
 	case NFT_REJECT_TCP_RST:
-		nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
+		nf_send_reset6(pkt->net, pkt->skb, pkt->hook);
 		break;
 	default:
 		break;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index e77102c..462f2a76b 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -138,9 +138,8 @@
 EXPORT_SYMBOL(ip6_dst_hoplimit);
 #endif
 
-static int __ip6_local_out_sk(struct sock *sk, struct sk_buff *skb)
+int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
 	int len;
 
 	len = skb->len - sizeof(struct ipv6hdr);
@@ -151,29 +150,18 @@
 
 	return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 		       net, sk, skb, NULL, skb_dst(skb)->dev,
-		       dst_output_okfn);
-}
-
-int __ip6_local_out(struct sk_buff *skb)
-{
-	return __ip6_local_out_sk(skb->sk, skb);
+		       dst_output);
 }
 EXPORT_SYMBOL_GPL(__ip6_local_out);
 
-int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb)
+int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	int err;
 
-	err = __ip6_local_out_sk(sk, skb);
+	err = __ip6_local_out(net, sk, skb);
 	if (likely(err == 1))
-		err = dst_output(sk, skb);
+		err = dst_output(net, sk, skb);
 
 	return err;
 }
-EXPORT_SYMBOL_GPL(ip6_local_out_sk);
-
-int ip6_local_out(struct sk_buff *skb)
-{
-	return ip6_local_out_sk(skb->sk, skb);
-}
 EXPORT_SYMBOL_GPL(ip6_local_out);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index fec0151..dc65ec1 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -655,7 +655,7 @@
 
 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
-		      NULL, rt->dst.dev, dst_output_okfn);
+		      NULL, rt->dst.dev, dst_output);
 	if (err > 0)
 		err = net_xmit_errno(err);
 	if (err)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 53617d7..db5b54a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -86,9 +86,9 @@
 static int		 ip6_dst_gc(struct dst_ops *ops);
 
 static int		ip6_pkt_discard(struct sk_buff *skb);
-static int		ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb);
+static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 static int		ip6_pkt_prohibit(struct sk_buff *skb);
-static int		ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb);
+static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 static void		ip6_link_failure(struct sk_buff *skb);
 static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
 					   struct sk_buff *skb, u32 mtu);
@@ -308,7 +308,7 @@
 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
 		.error		= -EINVAL,
 		.input		= dst_discard,
-		.output		= dst_discard_sk,
+		.output		= dst_discard_out,
 	},
 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
 	.rt6i_protocol  = RTPROT_KERNEL,
@@ -421,31 +421,7 @@
 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
 			       const struct flowi6 *fl6)
 {
-	unsigned int val = fl6->flowi6_proto;
-
-	val ^= ipv6_addr_hash(&fl6->daddr);
-	val ^= ipv6_addr_hash(&fl6->saddr);
-
-	/* Work only if this not encapsulated */
-	switch (fl6->flowi6_proto) {
-	case IPPROTO_UDP:
-	case IPPROTO_TCP:
-	case IPPROTO_SCTP:
-		val ^= (__force u16)fl6->fl6_sport;
-		val ^= (__force u16)fl6->fl6_dport;
-		break;
-
-	case IPPROTO_ICMPV6:
-		val ^= (__force u16)fl6->fl6_icmp_type;
-		val ^= (__force u16)fl6->fl6_icmp_code;
-		break;
-	}
-	/* RFC6438 recommands to use flowlabel */
-	val ^= (__force u32)fl6->flowlabel;
-
-	/* Perhaps, we need to tune, this function? */
-	val = val ^ (val >> 7) ^ (val >> 12);
-	return val % candidate_count;
+	return get_hash_from_flowi6(fl6) % candidate_count;
 }
 
 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
@@ -498,10 +474,10 @@
 			if (dev->flags & IFF_LOOPBACK) {
 				if (!sprt->rt6i_idev ||
 				    sprt->rt6i_idev->dev->ifindex != oif) {
-					if (flags & RT6_LOOKUP_F_IFACE && oif)
+					if (flags & RT6_LOOKUP_F_IFACE)
 						continue;
-					if (local && (!oif ||
-						      local->rt6i_idev->dev->ifindex == oif))
+					if (local &&
+					    local->rt6i_idev->dev->ifindex == oif)
 						continue;
 				}
 				local = sprt;
@@ -538,7 +514,7 @@
 		container_of(w, struct __rt6_probe_work, work);
 
 	addrconf_addr_solict_mult(&work->target, &mcaddr);
-	ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
+	ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, NULL);
 	dev_put(work->dev);
 	kfree(work);
 }
@@ -1193,7 +1169,8 @@
 
 	fl6->flowi6_iif = LOOPBACK_IFINDEX;
 
-	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
+	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
+	    fl6->flowi6_oif)
 		flags |= RT6_LOOKUP_F_IFACE;
 
 	if (!ipv6_addr_any(&fl6->saddr))
@@ -1218,7 +1195,7 @@
 
 		new->__use = 1;
 		new->input = dst_discard;
-		new->output = dst_discard_sk;
+		new->output = dst_discard_out;
 
 		if (dst_metrics_read_only(&ort->dst))
 			new->_metrics = ort->dst._metrics;
@@ -1322,8 +1299,7 @@
 	if (rt) {
 		if (rt->rt6i_flags & RTF_CACHE) {
 			dst_hold(&rt->dst);
-			if (ip6_del_rt(rt))
-				dst_free(&rt->dst);
+			ip6_del_rt(rt);
 		} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
 			rt->rt6i_node->fn_sernum = -1;
 		}
@@ -1748,21 +1724,21 @@
 	return -EINVAL;
 }
 
-int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
+static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
 {
-	int err;
 	struct net *net = cfg->fc_nlinfo.nl_net;
 	struct rt6_info *rt = NULL;
 	struct net_device *dev = NULL;
 	struct inet6_dev *idev = NULL;
 	struct fib6_table *table;
 	int addr_type;
+	int err = -EINVAL;
 
 	if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
-		return -EINVAL;
+		goto out;
 #ifndef CONFIG_IPV6_SUBTREES
 	if (cfg->fc_src_len)
-		return -EINVAL;
+		goto out;
 #endif
 	if (cfg->fc_ifindex) {
 		err = -ENODEV;
@@ -1877,7 +1853,7 @@
 		switch (cfg->fc_type) {
 		case RTN_BLACKHOLE:
 			rt->dst.error = -EINVAL;
-			rt->dst.output = dst_discard_sk;
+			rt->dst.output = dst_discard_out;
 			rt->dst.input = dst_discard;
 			break;
 		case RTN_PROHIBIT:
@@ -1886,9 +1862,11 @@
 			rt->dst.input = ip6_pkt_prohibit;
 			break;
 		case RTN_THROW:
+		case RTN_UNREACHABLE:
 		default:
 			rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
-					: -ENETUNREACH;
+					: (cfg->fc_type == RTN_UNREACHABLE)
+					? -EHOSTUNREACH : -ENETUNREACH;
 			rt->dst.output = ip6_pkt_discard_out;
 			rt->dst.input = ip6_pkt_discard;
 			break;
@@ -1980,9 +1958,7 @@
 
 	cfg->fc_nlinfo.nl_net = dev_net(dev);
 
-	*rt_ret = rt;
-
-	return 0;
+	return rt;
 out:
 	if (dev)
 		dev_put(dev);
@@ -1991,20 +1967,21 @@
 	if (rt)
 		dst_free(&rt->dst);
 
-	*rt_ret = NULL;
-
-	return err;
+	return ERR_PTR(err);
 }
 
 int ip6_route_add(struct fib6_config *cfg)
 {
 	struct mx6_config mxc = { .mx = NULL, };
-	struct rt6_info *rt = NULL;
+	struct rt6_info *rt;
 	int err;
 
-	err = ip6_route_info_create(cfg, &rt);
-	if (err)
+	rt = ip6_route_info_create(cfg);
+	if (IS_ERR(rt)) {
+		err = PTR_ERR(rt);
+		rt = NULL;
 		goto out;
+	}
 
 	err = ip6_convert_metrics(&mxc, cfg);
 	if (err)
@@ -2028,7 +2005,8 @@
 	struct fib6_table *table;
 	struct net *net = dev_net(rt->dst.dev);
 
-	if (rt == net->ipv6.ip6_null_entry) {
+	if (rt == net->ipv6.ip6_null_entry ||
+	    rt->dst.flags & DST_NOCACHE) {
 		err = -ENOENT;
 		goto out;
 	}
@@ -2467,7 +2445,7 @@
 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
 }
 
-static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb)
+static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	skb->dev = skb_dst(skb)->dev;
 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
@@ -2478,7 +2456,7 @@
 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
 }
 
-static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb)
+static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	skb->dev = skb_dst(skb)->dev;
 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
@@ -2515,6 +2493,7 @@
 	rt->rt6i_dst.addr = *addr;
 	rt->rt6i_dst.plen = 128;
 	rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
+	rt->dst.flags |= DST_NOCACHE;
 
 	atomic_set(&rt->dst.__refcnt, 1);
 
@@ -2891,9 +2870,12 @@
 				r_cfg.fc_encap_type = nla_get_u16(nla);
 		}
 
-		err = ip6_route_info_create(&r_cfg, &rt);
-		if (err)
+		rt = ip6_route_info_create(&r_cfg);
+		if (IS_ERR(rt)) {
+			err = PTR_ERR(rt);
+			rt = NULL;
 			goto cleanup;
+		}
 
 		err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
 		if (err) {
@@ -3303,7 +3285,8 @@
 	return err;
 }
 
-void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
+void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
+		     unsigned int nlm_flags)
 {
 	struct sk_buff *skb;
 	struct net *net = info->nl_net;
@@ -3318,7 +3301,7 @@
 		goto errout;
 
 	err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
-				event, info->portid, seq, 0, 0, 0);
+				event, info->portid, seq, 0, 0, nlm_flags);
 	if (err < 0) {
 		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
 		WARN_ON(err == -EMSGSIZE);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 0909f4e..bb8f2fa 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -114,14 +114,11 @@
 }
 EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
 
-__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp)
+__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mssp)
 {
 	const struct ipv6hdr *iph = ipv6_hdr(skb);
 	const struct tcphdr *th = tcp_hdr(skb);
 
-	tcp_synq_overflow(sk);
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
-
 	return __cookie_v6_init_sequence(iph, th, mssp);
 }
 
@@ -173,7 +170,7 @@
 		goto out;
 
 	ret = NULL;
-	req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk);
+	req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk, false);
 	if (!req)
 		goto out;
 
@@ -210,7 +207,7 @@
 	ireq->wscale_ok		= tcp_opt.wscale_ok;
 	ireq->tstamp_ok		= tcp_opt.saw_tstamp;
 	req->ts_recent		= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
-	treq->snt_synack	= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
+	treq->snt_synack.v64	= 0;
 	treq->rcv_isn = ntohl(th->seq) - 1;
 	treq->snt_isn = cookie;
 
@@ -238,9 +235,9 @@
 			goto out_free;
 	}
 
-	req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
+	req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
 	tcp_select_initial_window(tcp_full_space(sk), req->mss,
-				  &req->rcv_wnd, &req->window_clamp,
+				  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
 				  ireq->wscale_ok, &rcv_wscale,
 				  dst_metric(dst, RTAX_INITRWND));
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f9c0e26..2887c84 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -70,8 +70,8 @@
 #include <linux/crypto.h>
 #include <linux/scatterlist.h>
 
-static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
-static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
+static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 				      struct request_sock *req);
 
 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
@@ -82,7 +82,7 @@
 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
 #else
-static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
+static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 						   const struct in6_addr *addr)
 {
 	return NULL;
@@ -434,11 +434,12 @@
 }
 
 
-static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
+static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
 			      struct flowi *fl,
 			      struct request_sock *req,
 			      u16 queue_mapping,
-			      struct tcp_fastopen_cookie *foc)
+			      struct tcp_fastopen_cookie *foc,
+			      bool attach_req)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
 	struct ipv6_pinfo *np = inet6_sk(sk);
@@ -447,10 +448,11 @@
 	int err = -ENOMEM;
 
 	/* First, grab a route. */
-	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
+	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
+					       IPPROTO_TCP)) == NULL)
 		goto done;
 
-	skb = tcp_make_synack(sk, dst, req, foc);
+	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
 
 	if (skb) {
 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
@@ -476,13 +478,13 @@
 }
 
 #ifdef CONFIG_TCP_MD5SIG
-static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
+static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 						   const struct in6_addr *addr)
 {
 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 }
 
-static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
+static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
 						const struct sock *addr_sk)
 {
 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
@@ -621,8 +623,12 @@
 	return 1;
 }
 
-static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
+#endif
+
+static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
+				    const struct sk_buff *skb)
 {
+#ifdef CONFIG_TCP_MD5SIG
 	const __u8 *hash_location = NULL;
 	struct tcp_md5sig_key *hash_expected;
 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
@@ -659,26 +665,27 @@
 				     &ip6h->daddr, ntohs(th->dest));
 		return true;
 	}
+#endif
 	return false;
 }
-#endif
 
-static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
+static void tcp_v6_init_req(struct request_sock *req,
+			    const struct sock *sk_listener,
 			    struct sk_buff *skb)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
-	struct ipv6_pinfo *np = inet6_sk(sk);
+	const struct ipv6_pinfo *np = inet6_sk(sk_listener);
 
 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 
 	/* So that link locals have meaning */
-	if (!sk->sk_bound_dev_if &&
+	if (!sk_listener->sk_bound_dev_if &&
 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 		ireq->ir_iif = tcp_v6_iif(skb);
 
 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
-	    (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
+	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
 	     np->rxopt.bits.rxinfo ||
 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 	     np->rxopt.bits.rxohlim || np->repflow)) {
@@ -687,13 +694,14 @@
 	}
 }
 
-static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
+static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
+					  struct flowi *fl,
 					  const struct request_sock *req,
 					  bool *strict)
 {
 	if (strict)
 		*strict = true;
-	return inet6_csk_route_req(sk, &fl->u.ip6, req);
+	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
 }
 
 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
@@ -720,10 +728,9 @@
 	.route_req	=	tcp_v6_route_req,
 	.init_seq	=	tcp_v6_init_sequence,
 	.send_synack	=	tcp_v6_send_synack,
-	.queue_hash_add =	inet6_csk_reqsk_queue_hash_add,
 };
 
-static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
+static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
 				 int oif, struct tcp_md5sig_key *key, int rst,
 				 u8 tclass, u32 label)
@@ -822,7 +829,7 @@
 	kfree_skb(buff);
 }
 
-static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
+static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
 {
 	const struct tcphdr *th = tcp_hdr(skb);
 	u32 seq = 0, ack_seq = 0;
@@ -893,7 +900,7 @@
 #endif
 }
 
-static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
+static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
 			    struct tcp_md5sig_key *key, u8 tclass,
 			    u32 label)
@@ -916,7 +923,7 @@
 	inet_twsk_put(tw);
 }
 
-static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 				  struct request_sock *req)
 {
 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
@@ -924,44 +931,18 @@
 	 */
 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
-			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
+			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
 			0, 0);
 }
 
 
-static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
+static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
 {
-	const struct tcphdr *th = tcp_hdr(skb);
-	struct request_sock *req;
-	struct sock *nsk;
-
-	/* Find possible connection requests. */
-	req = inet6_csk_search_req(sk, th->source,
-				   &ipv6_hdr(skb)->saddr,
-				   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
-	if (req) {
-		nsk = tcp_check_req(sk, skb, req, false);
-		if (!nsk || nsk == sk)
-			reqsk_put(req);
-		return nsk;
-	}
-	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
-					 &ipv6_hdr(skb)->saddr, th->source,
-					 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
-					 tcp_v6_iif(skb));
-
-	if (nsk) {
-		if (nsk->sk_state != TCP_TIME_WAIT) {
-			bh_lock_sock(nsk);
-			return nsk;
-		}
-		inet_twsk_put(inet_twsk(nsk));
-		return NULL;
-	}
-
 #ifdef CONFIG_SYN_COOKIES
+	const struct tcphdr *th = tcp_hdr(skb);
+
 	if (!th->syn)
 		sk = cookie_v6_check(sk, skb);
 #endif
@@ -984,12 +965,13 @@
 	return 0; /* don't send reset */
 }
 
-static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 					 struct request_sock *req,
 					 struct dst_entry *dst)
 {
 	struct inet_request_sock *ireq;
-	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
+	struct ipv6_pinfo *newnp;
+	const struct ipv6_pinfo *np = inet6_sk(sk);
 	struct tcp6_sock *newtcp6sk;
 	struct inet_sock *newinet;
 	struct tcp_sock *newtp;
@@ -1057,7 +1039,7 @@
 		goto out_overflow;
 
 	if (!dst) {
-		dst = inet6_csk_route_req(sk, &fl6, req);
+		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
 		if (!dst)
 			goto out;
 	}
@@ -1090,8 +1072,6 @@
 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
 	newsk->sk_bound_dev_if = ireq->ir_iif;
 
-	newsk->sk_txhash = tcp_rsk(req)->txhash;
-
 	/* Now IPv6 options...
 
 	   First: no IPv4 options.
@@ -1181,7 +1161,7 @@
 }
 
 /* The socket must have it's spinlock held when we get
- * here.
+ * here, unless it is a TCP_LISTEN socket.
  *
  * We have a potential double-lock case here, so even when
  * doing backlog processing we use the BH locking scheme.
@@ -1252,18 +1232,14 @@
 		goto csum_err;
 
 	if (sk->sk_state == TCP_LISTEN) {
-		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
+		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
+
 		if (!nsk)
 			goto discard;
 
-		/*
-		 * Queue it on the new socket if the new socket is active,
-		 * otherwise we just shortcircuit this and continue with
-		 * the new socket..
-		 */
 		if (nsk != sk) {
 			sock_rps_save_rxhash(nsk, skb);
-			sk_mark_napi_id(sk, skb);
+			sk_mark_napi_id(nsk, skb);
 			if (tcp_child_process(sk, nsk, skb))
 				goto reset;
 			if (opt_skb)
@@ -1273,7 +1249,7 @@
 	} else
 		sock_rps_save_rxhash(sk, skb);
 
-	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
+	if (tcp_rcv_state_process(sk, skb))
 		goto reset;
 	if (opt_skb)
 		goto ipv6_pktoptions;
@@ -1396,6 +1372,33 @@
 	if (sk->sk_state == TCP_TIME_WAIT)
 		goto do_time_wait;
 
+	if (sk->sk_state == TCP_NEW_SYN_RECV) {
+		struct request_sock *req = inet_reqsk(sk);
+		struct sock *nsk = NULL;
+
+		sk = req->rsk_listener;
+		tcp_v6_fill_cb(skb, hdr, th);
+		if (tcp_v6_inbound_md5_hash(sk, skb)) {
+			reqsk_put(req);
+			goto discard_it;
+		}
+		if (sk->sk_state == TCP_LISTEN)
+			nsk = tcp_check_req(sk, skb, req, false);
+		if (!nsk) {
+			reqsk_put(req);
+			goto discard_it;
+		}
+		if (nsk == sk) {
+			sock_hold(sk);
+			reqsk_put(req);
+			tcp_v6_restore_cb(skb);
+		} else if (tcp_child_process(sk, nsk, skb)) {
+			tcp_v6_send_reset(nsk, skb);
+			goto discard_it;
+		} else {
+			return 0;
+		}
+	}
 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 		goto discard_and_relse;
@@ -1406,17 +1409,21 @@
 
 	tcp_v6_fill_cb(skb, hdr, th);
 
-#ifdef CONFIG_TCP_MD5SIG
 	if (tcp_v6_inbound_md5_hash(sk, skb))
 		goto discard_and_relse;
-#endif
 
 	if (sk_filter(sk, skb))
 		goto discard_and_relse;
 
-	sk_incoming_cpu_update(sk);
 	skb->dev = NULL;
 
+	if (sk->sk_state == TCP_LISTEN) {
+		ret = tcp_v6_do_rcv(sk, skb);
+		goto put_and_return;
+	}
+
+	sk_incoming_cpu_update(sk);
+
 	bh_lock_sock_nested(sk);
 	tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
 	ret = 0;
@@ -1431,6 +1438,7 @@
 	}
 	bh_unlock_sock(sk);
 
+put_and_return:
 	sock_put(sk);
 	return ret ? -1 : 0;
 
@@ -1631,7 +1639,7 @@
 #ifdef CONFIG_PROC_FS
 /* Proc filesystem TCPv6 sock list dumping. */
 static void get_openreq6(struct seq_file *seq,
-			 struct request_sock *req, int i, kuid_t uid)
+			 const struct request_sock *req, int i)
 {
 	long ttd = req->rsk_timer.expires - jiffies;
 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
@@ -1655,7 +1663,8 @@
 		   1,   /* timers active (only the expire timer) */
 		   jiffies_to_clock_t(ttd),
 		   req->num_timeout,
-		   from_kuid_munged(seq_user_ns(seq), uid),
+		   from_kuid_munged(seq_user_ns(seq),
+				    sock_i_uid(req->rsk_listener)),
 		   0,  /* non standard timer */
 		   0, /* open_requests have no inode */
 		   0, req);
@@ -1670,7 +1679,7 @@
 	const struct inet_sock *inet = inet_sk(sp);
 	const struct tcp_sock *tp = tcp_sk(sp);
 	const struct inet_connection_sock *icsk = inet_csk(sp);
-	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
+	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
 
 	dest  = &sp->sk_v6_daddr;
 	src   = &sp->sk_v6_rcv_saddr;
@@ -1714,7 +1723,7 @@
 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
 		   tp->snd_cwnd,
 		   sp->sk_state == TCP_LISTEN ?
-			(fastopenq ? fastopenq->max_qlen : 0) :
+			fastopenq->max_qlen :
 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
 		   );
 }
@@ -1760,18 +1769,12 @@
 	}
 	st = seq->private;
 
-	switch (st->state) {
-	case TCP_SEQ_STATE_LISTENING:
-	case TCP_SEQ_STATE_ESTABLISHED:
-		if (sk->sk_state == TCP_TIME_WAIT)
-			get_timewait6_sock(seq, v, st->num);
-		else
-			get_tcp6_sock(seq, v, st->num);
-		break;
-	case TCP_SEQ_STATE_OPENREQ:
-		get_openreq6(seq, v, st->num, st->uid);
-		break;
-	}
+	if (sk->sk_state == TCP_TIME_WAIT)
+		get_timewait6_sock(seq, v, st->num);
+	else if (sk->sk_state == TCP_NEW_SYN_RECV)
+		get_openreq6(seq, v, st->num);
+	else
+		get_tcp6_sock(seq, v, st->num);
 out:
 	return 0;
 }
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 0aba654..01bcb49 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -182,10 +182,12 @@
 		score++;
 	}
 
+	if (sk->sk_incoming_cpu == raw_smp_processor_id())
+		score++;
+
 	return score;
 }
 
-#define SCORE2_MAX (1 + 1 + 1)
 static inline int compute_score2(struct sock *sk, struct net *net,
 				 const struct in6_addr *saddr, __be16 sport,
 				 const struct in6_addr *daddr,
@@ -223,6 +225,9 @@
 		score++;
 	}
 
+	if (sk->sk_incoming_cpu == raw_smp_processor_id())
+		score++;
+
 	return score;
 }
 
@@ -251,8 +256,7 @@
 				hash = udp6_ehashfn(net, daddr, hnum,
 						    saddr, sport);
 				matches = 1;
-			} else if (score == SCORE2_MAX)
-				goto exact_match;
+			}
 		} else if (score == badness && reuseport) {
 			matches++;
 			if (reciprocal_scale(hash, matches) == 0)
@@ -269,7 +273,6 @@
 		goto begin;
 
 	if (result) {
-exact_match:
 		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
 			result = NULL;
 		else if (unlikely(compute_score2(result, net, saddr, sport,
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 0c3e9ff..9db067a 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -131,6 +131,13 @@
 	return xfrm_output(sk, skb);
 }
 
+static int __xfrm6_output_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+	struct xfrm_state *x = skb_dst(skb)->xfrm;
+
+	return x->outer_mode->afinfo->output_finish(sk, skb);
+}
+
 static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
@@ -140,7 +147,7 @@
 #ifdef CONFIG_NETFILTER
 	if (!x) {
 		IP6CB(skb)->flags |= IP6SKB_REROUTED;
-		return dst_output(sk, skb);
+		return dst_output(net, sk, skb);
 	}
 #endif
 
@@ -160,16 +167,14 @@
 	if (x->props.mode == XFRM_MODE_TUNNEL &&
 	    ((skb->len > mtu && !skb_is_gso(skb)) ||
 		dst_allfrag(skb_dst(skb)))) {
-		return ip6_fragment(sk, skb,
-				    x->outer_mode->afinfo->output_finish);
+		return ip6_fragment(net, sk, skb,
+				    __xfrm6_output_finish);
 	}
 	return x->outer_mode->afinfo->output_finish(sk, skb);
 }
 
-int xfrm6_output(struct sock *sk, struct sk_buff *skb)
+int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
-
 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
 			    net, sk, skb,  NULL, skb_dst(skb)->dev,
 			    __xfrm6_output,
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 30caa28..08c9c93 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -20,7 +20,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 #if IS_ENABLED(CONFIG_IPV6_MIP6)
 #include <net/mip6.h>
 #endif
@@ -37,6 +37,7 @@
 
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_oif = oif;
+	fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
 	memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
 	if (saddr)
 		memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
@@ -132,10 +133,8 @@
 
 	nexthdr = nh[nhoff];
 
-	if (skb_dst(skb)) {
-		oif = vrf_master_ifindex(skb_dst(skb)->dev) ?
-			: skb_dst(skb)->dev->ifindex;
-	}
+	if (skb_dst(skb))
+		oif = l3mdev_fib_oif(skb_dst(skb)->dev);
 
 	memset(fl6, 0, sizeof(struct flowi6));
 	fl6->flowi6_mark = skb->mark;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 918151c..fcb2752 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -95,11 +95,10 @@
 /* Call Back functions */
 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
-static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
-static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
-				 u8 ipuser[16]);
-static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
-static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
+static void iucv_callback_connack(struct iucv_path *, u8 *);
+static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
+static void iucv_callback_connrej(struct iucv_path *, u8 *);
+static void iucv_callback_shutdown(struct iucv_path *, u8 *);
 
 static struct iucv_sock_list iucv_sk_list = {
 	.lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 2a6a1fd..7eaa000 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -713,7 +713,7 @@
  *
  * Sever an iucv path to free up the pathid. Used internally.
  */
-static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
+static int iucv_sever_pathid(u16 pathid, u8 *userdata)
 {
 	union iucv_param *parm;
 
@@ -876,7 +876,7 @@
  * Returns the result of the CP IUCV call.
  */
 int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
-		     u8 userdata[16], void *private)
+		     u8 *userdata, void *private)
 {
 	union iucv_param *parm;
 	int rc;
@@ -923,7 +923,7 @@
  * Returns the result of the CP IUCV call.
  */
 int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
-		      u8 userid[8], u8 system[8], u8 userdata[16],
+		      u8 *userid, u8 *system, u8 *userdata,
 		      void *private)
 {
 	union iucv_param *parm;
@@ -985,7 +985,7 @@
  *
  * Returns the result from the CP IUCV call.
  */
-int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
+int iucv_path_quiesce(struct iucv_path *path, u8 *userdata)
 {
 	union iucv_param *parm;
 	int rc;
@@ -1017,7 +1017,7 @@
  *
  * Returns the result from the CP IUCV call.
  */
-int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
+int iucv_path_resume(struct iucv_path *path, u8 *userdata)
 {
 	union iucv_param *parm;
 	int rc;
@@ -1047,7 +1047,7 @@
  *
  * Returns the result from the CP IUCV call.
  */
-int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
+int iucv_path_sever(struct iucv_path *path, u8 *userdata)
 {
 	int rc;
 
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index f6b090d..afca2eb 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1319,7 +1319,7 @@
 	tunnel = container_of(work, struct l2tp_tunnel, del_work);
 	sk = l2tp_tunnel_sock_lookup(tunnel);
 	if (!sk)
-		return;
+		goto out;
 
 	sock = sk->sk_socket;
 
@@ -1341,6 +1341,8 @@
 	}
 
 	l2tp_tunnel_sock_put(sk);
+out:
+	l2tp_tunnel_dec_refcount(tunnel);
 }
 
 /* Create a socket for the tunnel, if one isn't set up by
@@ -1636,8 +1638,13 @@
  */
 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
 {
+	l2tp_tunnel_inc_refcount(tunnel);
 	l2tp_tunnel_closeall(tunnel);
-	return (false == queue_work(l2tp_wq, &tunnel->del_work));
+	if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
+		l2tp_tunnel_dec_refcount(tunnel);
+		return 1;
+	}
+	return 0;
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
 
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 68aa9ff..5871537 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -321,4 +321,7 @@
 #define l2tp_dbg(ptr, type, fmt, ...)					\
 	l2tp_printk(ptr, type, pr_debug, fmt, ##__VA_ARGS__)
 
+#define MODULE_ALIAS_L2TP_PWTYPE(type) \
+	MODULE_ALIAS("net-l2tp-type-" __stringify(type))
+
 #endif /* _L2TP_CORE_H_ */
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 4b55287..e253c26 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -358,3 +358,4 @@
 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
 MODULE_DESCRIPTION("L2TP ethernet pseudowire driver");
 MODULE_VERSION("1.0");
+MODULE_ALIAS_L2TP_PWTYPE(5);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 7964993..ec22078 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -655,3 +655,4 @@
  * enums
  */
 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
+MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index d1ded37..aca38d8 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -801,3 +801,4 @@
  * enums
  */
 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP);
+MODULE_ALIAS_NET_PF_PROTO(PF_INET6, IPPROTO_L2TP);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 9e13c2f..f93c5be 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -576,6 +576,13 @@
 	if (info->attrs[L2TP_ATTR_MRU])
 		cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
 
+#ifdef CONFIG_MODULES
+	if (l2tp_nl_cmd_ops[cfg.pw_type] == NULL) {
+		genl_unlock();
+		request_module("net-l2tp-type-%u", cfg.pw_type);
+		genl_lock();
+	}
+#endif
 	if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
 	    (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
 		ret = -EPROTONOSUPPORT;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index f56c9f6..1ad18c5 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1863,3 +1863,4 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(PPPOL2TP_DRV_VERSION);
 MODULE_ALIAS("pppox-proto-" __stringify(PX_PROTO_OL2TP));
+MODULE_ALIAS_L2TP_PWTYPE(11);
diff --git a/net/l3mdev/Kconfig b/net/l3mdev/Kconfig
new file mode 100644
index 0000000..5d47325
--- /dev/null
+++ b/net/l3mdev/Kconfig
@@ -0,0 +1,10 @@
+#
+# Configuration for L3 master device support
+#
+
+config NET_L3_MASTER_DEV
+	bool "L3 Master device support"
+	depends on INET || IPV6
+	---help---
+	  This module provides glue between core networking code and device
+	  drivers to support L3 master devices like VRF.
diff --git a/net/l3mdev/Makefile b/net/l3mdev/Makefile
new file mode 100644
index 0000000..84a53a6
--- /dev/null
+++ b/net/l3mdev/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the L3 device API
+#
+
+obj-$(CONFIG_NET_L3_MASTER_DEV) += l3mdev.o
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
new file mode 100644
index 0000000..8e5ead3
--- /dev/null
+++ b/net/l3mdev/l3mdev.c
@@ -0,0 +1,92 @@
+/*
+ * net/l3mdev/l3mdev.c - L3 master device implementation
+ * Copyright (c) 2015 Cumulus Networks
+ * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/netdevice.h>
+#include <net/l3mdev.h>
+
+/**
+ *	l3mdev_master_ifindex - get index of L3 master device
+ *	@dev: targeted interface
+ */
+
+int l3mdev_master_ifindex_rcu(struct net_device *dev)
+{
+	int ifindex = 0;
+
+	if (!dev)
+		return 0;
+
+	if (netif_is_l3_master(dev)) {
+		ifindex = dev->ifindex;
+	} else if (netif_is_l3_slave(dev)) {
+		struct net_device *master;
+
+		master = netdev_master_upper_dev_get_rcu(dev);
+		if (master)
+			ifindex = master->ifindex;
+	}
+
+	return ifindex;
+}
+EXPORT_SYMBOL_GPL(l3mdev_master_ifindex_rcu);
+
+/**
+ *	l3mdev_fib_table - get FIB table id associated with an L3
+ *                             master interface
+ *	@dev: targeted interface
+ */
+
+u32 l3mdev_fib_table_rcu(const struct net_device *dev)
+{
+	u32 tb_id = 0;
+
+	if (!dev)
+		return 0;
+
+	if (netif_is_l3_master(dev)) {
+		if (dev->l3mdev_ops->l3mdev_fib_table)
+			tb_id = dev->l3mdev_ops->l3mdev_fib_table(dev);
+	} else if (netif_is_l3_slave(dev)) {
+		/* Users of netdev_master_upper_dev_get_rcu need non-const,
+		 * but current inet_*type functions take a const
+		 */
+		struct net_device *_dev = (struct net_device *) dev;
+		const struct net_device *master;
+
+		master = netdev_master_upper_dev_get_rcu(_dev);
+		if (master &&
+		    master->l3mdev_ops->l3mdev_fib_table)
+			tb_id = master->l3mdev_ops->l3mdev_fib_table(master);
+	}
+
+	return tb_id;
+}
+EXPORT_SYMBOL_GPL(l3mdev_fib_table_rcu);
+
+u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
+{
+	struct net_device *dev;
+	u32 tb_id = 0;
+
+	if (!ifindex)
+		return 0;
+
+	rcu_read_lock();
+
+	dev = dev_get_by_index_rcu(net, ifindex);
+	if (dev)
+		tb_id = l3mdev_fib_table_rcu(dev);
+
+	rcu_read_unlock();
+
+	return tb_id;
+}
+EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index);
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 5c564a6..10ad4ac 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -79,7 +79,7 @@
 	       (int)reason);
 
 	if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
-			     &sta->sta, tid, NULL, 0))
+			     &sta->sta, tid, NULL, 0, false))
 		sdata_info(sta->sdata,
 			   "HW problem - can not stop rx aggregation for %pM tid %d\n",
 			   sta->sta.addr, tid);
@@ -189,6 +189,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct sk_buff *skb;
 	struct ieee80211_mgmt *mgmt;
+	bool amsdu = ieee80211_hw_check(&local->hw, SUPPORTS_AMSDU_IN_AMPDU);
 	u16 capab;
 
 	skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
@@ -217,7 +218,8 @@
 	mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP;
 	mgmt->u.action.u.addba_resp.dialog_token = dialog_token;
 
-	capab = (u16)(policy << 1);	/* bit 1 aggregation policy */
+	capab = (u16)(amsdu << 0);	/* bit 0 A-MSDU support */
+	capab |= (u16)(policy << 1);	/* bit 1 aggregation policy */
 	capab |= (u16)(tid << 2); 	/* bit 5:2 TID number */
 	capab |= (u16)(buf_size << 6);	/* bit 15:6 max size of aggregation */
 
@@ -321,7 +323,7 @@
 		__skb_queue_head_init(&tid_agg_rx->reorder_buf[i]);
 
 	ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
-			       &sta->sta, tid, &start_seq_num, 0);
+			       &sta->sta, tid, &start_seq_num, 0, false);
 	ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n",
 	       sta->sta.addr, tid, ret);
 	if (ret) {
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c8ba2e7..a758eb84 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -97,7 +97,8 @@
 	mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
 
 	mgmt->u.action.u.addba_req.dialog_token = dialog_token;
-	capab = (u16)(1 << 1);		/* bit 1 aggregation policy */
+	capab = (u16)(1 << 0);		/* bit 0 A-MSDU support */
+	capab |= (u16)(1 << 1);		/* bit 1 aggregation policy */
 	capab |= (u16)(tid << 2); 	/* bit 5:2 TID number */
 	capab |= (u16)(agg_size << 6);	/* bit 15:6 max size of aggergation */
 
@@ -331,7 +332,7 @@
 			return -EALREADY;
 		ret = drv_ampdu_action(local, sta->sdata,
 				       IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
-				       &sta->sta, tid, NULL, 0);
+				       &sta->sta, tid, NULL, 0, false);
 		WARN_ON_ONCE(ret);
 		return 0;
 	}
@@ -381,7 +382,7 @@
 	tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
 
 	ret = drv_ampdu_action(local, sta->sdata, action,
-			       &sta->sta, tid, NULL, 0);
+			       &sta->sta, tid, NULL, 0, false);
 
 	/* HW shall not deny going back to legacy */
 	if (WARN_ON(ret)) {
@@ -469,7 +470,7 @@
 	start_seq_num = sta->tid_seq[tid] >> 4;
 
 	ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
-			       &sta->sta, tid, &start_seq_num, 0);
+			       &sta->sta, tid, &start_seq_num, 0, false);
 	if (ret) {
 		ht_dbg(sdata,
 		       "BA request denied - HW unavailable for %pM tid %d\n",
@@ -693,7 +694,8 @@
 
 	drv_ampdu_action(local, sta->sdata,
 			 IEEE80211_AMPDU_TX_OPERATIONAL,
-			 &sta->sta, tid, NULL, tid_tx->buf_size);
+			 &sta->sta, tid, NULL, tid_tx->buf_size,
+			 tid_tx->amsdu);
 
 	/*
 	 * synchronize with TX path, while splicing the TX path
@@ -918,8 +920,10 @@
 	struct tid_ampdu_tx *tid_tx;
 	u16 capab, tid;
 	u8 buf_size;
+	bool amsdu;
 
 	capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
+	amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
 	tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
 	buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
 
@@ -968,6 +972,7 @@
 		}
 
 		tid_tx->buf_size = buf_size;
+		tid_tx->amsdu = amsdu;
 
 		if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
 			ieee80211_agg_tx_operational(local, sta, tid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 17b1fe9..68e551e 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -981,7 +981,7 @@
 		 * well. Some drivers require rate control initialized
 		 * before drv_sta_state() is called.
 		 */
-		if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+		if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
 			rate_control_rate_init(sta);
 
 		ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
@@ -1120,8 +1120,11 @@
 	    local->hw.queues >= IEEE80211_NUM_ACS)
 		sta->sta.wme = set & BIT(NL80211_STA_FLAG_WME);
 
-	/* auth flags will be set later for TDLS stations */
-	if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+	/* auth flags will be set later for TDLS,
+	 * and for unassociated stations that move to assocaited */
+	if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+	    !((mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) &&
+	      (set & BIT(NL80211_STA_FLAG_ASSOCIATED)))) {
 		ret = sta_apply_auth_flags(local, sta, mask, set);
 		if (ret)
 			return ret;
@@ -1156,6 +1159,7 @@
 		set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH);
 
 	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+	    !sdata->u.mgd.tdls_wider_bw_prohibited &&
 	    ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
 	    params->ext_capab_len >= 8 &&
 	    params->ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED)
@@ -1212,7 +1216,8 @@
 		sta_apply_mesh_params(local, sta, params);
 
 	/* set the STA state after all sta info from usermode has been set */
-	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) ||
+	    set & BIT(NL80211_STA_FLAG_ASSOCIATED)) {
 		ret = sta_apply_auth_flags(local, sta, mask, set);
 		if (ret)
 			return ret;
@@ -1254,12 +1259,14 @@
 	 * defaults -- if userspace wants something else we'll
 	 * change it accordingly in sta_apply_parameters()
 	 */
-	if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) {
+	if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
+	    !(params->sta_flags_set & (BIT(NL80211_STA_FLAG_AUTHENTICATED) |
+					BIT(NL80211_STA_FLAG_ASSOCIATED)))) {
 		sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
 		sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
-	} else {
-		sta->sta.tdls = true;
 	}
+	if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
+		sta->sta.tdls = true;
 
 	err = sta_apply_parameters(local, sta, params);
 	if (err) {
@@ -1268,10 +1275,12 @@
 	}
 
 	/*
-	 * for TDLS, rate control should be initialized only when
-	 * rates are known and station is marked authorized
+	 * for TDLS and for unassociated station, rate control should be
+	 * initialized only when rates are known and station is marked
+	 * authorized/associated
 	 */
-	if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+	if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+	    test_sta_flag(sta, WLAN_STA_ASSOC))
 		rate_control_rate_init(sta);
 
 	layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
@@ -1346,7 +1355,10 @@
 		break;
 	case NL80211_IFTYPE_AP:
 	case NL80211_IFTYPE_AP_VLAN:
-		statype = CFG80211_STA_AP_CLIENT;
+		if (test_sta_flag(sta, WLAN_STA_ASSOC))
+			statype = CFG80211_STA_AP_CLIENT;
+		else
+			statype = CFG80211_STA_AP_CLIENT_UNASSOC;
 		break;
 	default:
 		err = -EOPNOTSUPP;
@@ -2474,6 +2486,7 @@
 
 	bss_conf->cqm_rssi_thold = rssi_thold;
 	bss_conf->cqm_rssi_hyst = rssi_hyst;
+	sdata->u.mgd.last_cqm_event_signal = 0;
 
 	/* tell the driver upon association, unless already associated */
 	if (sdata->u.mgd.associated &&
@@ -2518,15 +2531,17 @@
 			continue;
 
 		for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
-			if (~sdata->rc_rateidx_mcs_mask[i][j])
+			if (~sdata->rc_rateidx_mcs_mask[i][j]) {
 				sdata->rc_has_mcs_mask[i] = true;
-
-			if (~sdata->rc_rateidx_vht_mcs_mask[i][j])
-				sdata->rc_has_vht_mcs_mask[i] = true;
-
-			if (sdata->rc_has_mcs_mask[i] &&
-			    sdata->rc_has_vht_mcs_mask[i])
 				break;
+			}
+		}
+
+		for (j = 0; j < NL80211_VHT_NSS_MAX; j++) {
+			if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) {
+				sdata->rc_has_vht_mcs_mask[i] = true;
+				break;
+			}
 		}
 	}
 
@@ -3519,18 +3534,32 @@
 					  u16 frame_type, bool reg)
 {
 	struct ieee80211_local *local = wiphy_priv(wiphy);
+	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 
 	switch (frame_type) {
 	case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ:
-		if (reg)
+		if (reg) {
 			local->probe_req_reg++;
-		else
-			local->probe_req_reg--;
+			sdata->vif.probe_req_reg++;
+		} else {
+			if (local->probe_req_reg)
+				local->probe_req_reg--;
+
+			if (sdata->vif.probe_req_reg)
+				sdata->vif.probe_req_reg--;
+		}
 
 		if (!local->open_count)
 			break;
 
-		ieee80211_queue_work(&local->hw, &local->reconfig_filter);
+		if (sdata->vif.probe_req_reg == 1)
+			drv_config_iface_filter(local, sdata, FIF_PROBE_REQ,
+						FIF_PROBE_REQ);
+		else if (sdata->vif.probe_req_reg == 0)
+			drv_config_iface_filter(local, sdata, 0,
+						FIF_PROBE_REQ);
+
+		ieee80211_configure_filter(local);
 		break;
 	default:
 		break;
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index ced6bf3..3636b45 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -123,6 +123,8 @@
 	FLAG(SUPPORTS_CLONED_SKBS),
 	FLAG(SINGLE_SCAN_ON_ALL_BANDS),
 	FLAG(TDLS_WIDER_BW),
+	FLAG(SUPPORTS_AMSDU_IN_AMPDU),
+	FLAG(BEACON_TX_STATUS),
 
 	/* keep last for the build bug below */
 	(void *)0x1
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 702ca12..7961e7d0 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -2,6 +2,7 @@
  * Copyright 2003-2005	Devicescape Software, Inc.
  * Copyright (c) 2006	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2015	Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -34,6 +35,14 @@
 	.llseek = generic_file_llseek,					\
 }
 
+#define KEY_OPS_W(name)							\
+static const struct file_operations key_ ##name## _ops = {		\
+	.read = key_##name##_read,					\
+	.write = key_##name##_write,					\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+}
+
 #define KEY_FILE(name, format)						\
 		 KEY_READ_##format(name)				\
 		 KEY_OPS(name)
@@ -74,6 +83,41 @@
 }
 KEY_OPS(algorithm);
 
+static ssize_t key_tx_spec_write(struct file *file, const char __user *userbuf,
+				 size_t count, loff_t *ppos)
+{
+	struct ieee80211_key *key = file->private_data;
+	u64 pn;
+	int ret;
+
+	switch (key->conf.cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+		return -EINVAL;
+	case WLAN_CIPHER_SUITE_TKIP:
+		/* not supported yet */
+		return -EOPNOTSUPP;
+	case WLAN_CIPHER_SUITE_CCMP:
+	case WLAN_CIPHER_SUITE_CCMP_256:
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+	case WLAN_CIPHER_SUITE_GCMP:
+	case WLAN_CIPHER_SUITE_GCMP_256:
+		ret = kstrtou64_from_user(userbuf, count, 16, &pn);
+		if (ret)
+			return ret;
+		/* PN is a 48-bit counter */
+		if (pn >= (1ULL << 48))
+			return -ERANGE;
+		atomic64_set(&key->conf.tx_pn, pn);
+		return count;
+	default:
+		return 0;
+	}
+}
+
 static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
 				size_t count, loff_t *ppos)
 {
@@ -110,7 +154,7 @@
 	}
 	return simple_read_from_buffer(userbuf, count, ppos, buf, len);
 }
-KEY_OPS(tx_spec);
+KEY_OPS_W(tx_spec);
 
 static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
 				size_t count, loff_t *ppos)
@@ -278,6 +322,9 @@
 #define DEBUGFS_ADD(name) \
 	debugfs_create_file(#name, 0400, key->debugfs.dir, \
 			    key, &key_##name##_ops);
+#define DEBUGFS_ADD_W(name) \
+	debugfs_create_file(#name, 0600, key->debugfs.dir, \
+			    key, &key_##name##_ops);
 
 void ieee80211_debugfs_key_add(struct ieee80211_key *key)
 {
@@ -310,7 +357,7 @@
 	DEBUGFS_ADD(keyidx);
 	DEBUGFS_ADD(hw_key_idx);
 	DEBUGFS_ADD(algorithm);
-	DEBUGFS_ADD(tx_spec);
+	DEBUGFS_ADD_W(tx_spec);
 	DEBUGFS_ADD(rx_spec);
 	DEBUGFS_ADD(replays);
 	DEBUGFS_ADD(icverrors);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 1021e87..37ea30e 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -114,14 +114,6 @@
 	return scnprintf(buf, buflen, "%pM\n", sdata->field);		\
 }
 
-#define IEEE80211_IF_FMT_DEC_DIV_16(name, field)			\
-static ssize_t ieee80211_if_fmt_##name(					\
-	const struct ieee80211_sub_if_data *sdata,			\
-	char *buf, int buflen)						\
-{									\
-	return scnprintf(buf, buflen, "%d\n", sdata->field / 16);	\
-}
-
 #define IEEE80211_IF_FMT_JIFFIES_TO_MS(name, field)			\
 static ssize_t ieee80211_if_fmt_##name(					\
 	const struct ieee80211_sub_if_data *sdata,			\
@@ -247,8 +239,6 @@
 /* STA attributes */
 IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
 IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
-IEEE80211_IF_FILE(last_beacon, u.mgd.last_beacon_signal, DEC);
-IEEE80211_IF_FILE(ave_beacon, u.mgd.ave_beacon_signal, DEC_DIV_16);
 IEEE80211_IF_FILE(beacon_timeout, u.mgd.beacon_timeout, JIFFIES_TO_MS);
 
 static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
@@ -455,6 +445,34 @@
 }
 IEEE80211_IF_FILE_RW(uapsd_max_sp_len);
 
+static ssize_t ieee80211_if_fmt_tdls_wider_bw(
+	const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+{
+	const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+	bool tdls_wider_bw;
+
+	tdls_wider_bw = ieee80211_hw_check(&sdata->local->hw, TDLS_WIDER_BW) &&
+			!ifmgd->tdls_wider_bw_prohibited;
+
+	return snprintf(buf, buflen, "%d\n", tdls_wider_bw);
+}
+
+static ssize_t ieee80211_if_parse_tdls_wider_bw(
+	struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
+{
+	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+	u8 val;
+	int ret;
+
+	ret = kstrtou8(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	ifmgd->tdls_wider_bw_prohibited = !val;
+	return buflen;
+}
+IEEE80211_IF_FILE_RW(tdls_wider_bw);
+
 /* AP attributes */
 IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC);
 IEEE80211_IF_FILE(num_sta_ps, u.ap.ps.num_sta_ps, ATOMIC);
@@ -606,14 +624,13 @@
 {
 	DEBUGFS_ADD(bssid);
 	DEBUGFS_ADD(aid);
-	DEBUGFS_ADD(last_beacon);
-	DEBUGFS_ADD(ave_beacon);
 	DEBUGFS_ADD(beacon_timeout);
 	DEBUGFS_ADD_MODE(smps, 0600);
 	DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
 	DEBUGFS_ADD_MODE(beacon_loss, 0200);
 	DEBUGFS_ADD_MODE(uapsd_queues, 0600);
 	DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600);
+	DEBUGFS_ADD_MODE(tdls_wider_bw, 0600);
 }
 
 static void add_ap_files(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index 267c3b1..a1d5431 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -8,6 +8,60 @@
 #include "trace.h"
 #include "driver-ops.h"
 
+int drv_add_interface(struct ieee80211_local *local,
+		      struct ieee80211_sub_if_data *sdata)
+{
+	int ret;
+
+	might_sleep();
+
+	if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+		    (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+		     !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
+		     !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))))
+		return -EINVAL;
+
+	trace_drv_add_interface(local, sdata);
+	ret = local->ops->add_interface(&local->hw, &sdata->vif);
+	trace_drv_return_int(local, ret);
+
+	if (ret == 0)
+		sdata->flags |= IEEE80211_SDATA_IN_DRIVER;
+
+	return ret;
+}
+
+int drv_change_interface(struct ieee80211_local *local,
+			 struct ieee80211_sub_if_data *sdata,
+			 enum nl80211_iftype type, bool p2p)
+{
+	int ret;
+
+	might_sleep();
+
+	if (!check_sdata_in_driver(sdata))
+		return -EIO;
+
+	trace_drv_change_interface(local, sdata, type, p2p);
+	ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
+	trace_drv_return_int(local, ret);
+	return ret;
+}
+
+void drv_remove_interface(struct ieee80211_local *local,
+			  struct ieee80211_sub_if_data *sdata)
+{
+	might_sleep();
+
+	if (!check_sdata_in_driver(sdata))
+		return;
+
+	trace_drv_remove_interface(local, sdata);
+	local->ops->remove_interface(&local->hw, &sdata->vif);
+	sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
+	trace_drv_return_void(local);
+}
+
 __must_check
 int drv_sta_state(struct ieee80211_local *local,
 		  struct ieee80211_sub_if_data *sdata,
@@ -39,3 +93,171 @@
 	trace_drv_return_int(local, ret);
 	return ret;
 }
+
+void drv_sta_rc_update(struct ieee80211_local *local,
+		       struct ieee80211_sub_if_data *sdata,
+		       struct ieee80211_sta *sta, u32 changed)
+{
+	sdata = get_bss_sdata(sdata);
+	if (!check_sdata_in_driver(sdata))
+		return;
+
+	WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
+		(sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+		 sdata->vif.type != NL80211_IFTYPE_MESH_POINT));
+
+	trace_drv_sta_rc_update(local, sdata, sta, changed);
+	if (local->ops->sta_rc_update)
+		local->ops->sta_rc_update(&local->hw, &sdata->vif,
+					  sta, changed);
+
+	trace_drv_return_void(local);
+}
+
+int drv_conf_tx(struct ieee80211_local *local,
+		struct ieee80211_sub_if_data *sdata, u16 ac,
+		const struct ieee80211_tx_queue_params *params)
+{
+	int ret = -EOPNOTSUPP;
+
+	might_sleep();
+
+	if (!check_sdata_in_driver(sdata))
+		return -EIO;
+
+	if (WARN_ONCE(params->cw_min == 0 ||
+		      params->cw_min > params->cw_max,
+		      "%s: invalid CW_min/CW_max: %d/%d\n",
+		      sdata->name, params->cw_min, params->cw_max))
+		return -EINVAL;
+
+	trace_drv_conf_tx(local, sdata, ac, params);
+	if (local->ops->conf_tx)
+		ret = local->ops->conf_tx(&local->hw, &sdata->vif,
+					  ac, params);
+	trace_drv_return_int(local, ret);
+	return ret;
+}
+
+u64 drv_get_tsf(struct ieee80211_local *local,
+		struct ieee80211_sub_if_data *sdata)
+{
+	u64 ret = -1ULL;
+
+	might_sleep();
+
+	if (!check_sdata_in_driver(sdata))
+		return ret;
+
+	trace_drv_get_tsf(local, sdata);
+	if (local->ops->get_tsf)
+		ret = local->ops->get_tsf(&local->hw, &sdata->vif);
+	trace_drv_return_u64(local, ret);
+	return ret;
+}
+
+void drv_set_tsf(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 u64 tsf)
+{
+	might_sleep();
+
+	if (!check_sdata_in_driver(sdata))
+		return;
+
+	trace_drv_set_tsf(local, sdata, tsf);
+	if (local->ops->set_tsf)
+		local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
+	trace_drv_return_void(local);
+}
+
+void drv_reset_tsf(struct ieee80211_local *local,
+		   struct ieee80211_sub_if_data *sdata)
+{
+	might_sleep();
+
+	if (!check_sdata_in_driver(sdata))
+		return;
+
+	trace_drv_reset_tsf(local, sdata);
+	if (local->ops->reset_tsf)
+		local->ops->reset_tsf(&local->hw, &sdata->vif);
+	trace_drv_return_void(local);
+}
+
+int drv_switch_vif_chanctx(struct ieee80211_local *local,
+			   struct ieee80211_vif_chanctx_switch *vifs,
+			   int n_vifs, enum ieee80211_chanctx_switch_mode mode)
+{
+	int ret = 0;
+	int i;
+
+	if (!local->ops->switch_vif_chanctx)
+		return -EOPNOTSUPP;
+
+	for (i = 0; i < n_vifs; i++) {
+		struct ieee80211_chanctx *new_ctx =
+			container_of(vifs[i].new_ctx,
+				     struct ieee80211_chanctx,
+				     conf);
+		struct ieee80211_chanctx *old_ctx =
+			container_of(vifs[i].old_ctx,
+				     struct ieee80211_chanctx,
+				     conf);
+
+		WARN_ON_ONCE(!old_ctx->driver_present);
+		WARN_ON_ONCE((mode == CHANCTX_SWMODE_SWAP_CONTEXTS &&
+			      new_ctx->driver_present) ||
+			     (mode == CHANCTX_SWMODE_REASSIGN_VIF &&
+			      !new_ctx->driver_present));
+	}
+
+	trace_drv_switch_vif_chanctx(local, vifs, n_vifs, mode);
+	ret = local->ops->switch_vif_chanctx(&local->hw,
+					     vifs, n_vifs, mode);
+	trace_drv_return_int(local, ret);
+
+	if (!ret && mode == CHANCTX_SWMODE_SWAP_CONTEXTS) {
+		for (i = 0; i < n_vifs; i++) {
+			struct ieee80211_chanctx *new_ctx =
+				container_of(vifs[i].new_ctx,
+					     struct ieee80211_chanctx,
+					     conf);
+			struct ieee80211_chanctx *old_ctx =
+				container_of(vifs[i].old_ctx,
+					     struct ieee80211_chanctx,
+					     conf);
+
+			new_ctx->driver_present = true;
+			old_ctx->driver_present = false;
+		}
+	}
+
+	return ret;
+}
+
+int drv_ampdu_action(struct ieee80211_local *local,
+		     struct ieee80211_sub_if_data *sdata,
+		     enum ieee80211_ampdu_mlme_action action,
+		     struct ieee80211_sta *sta, u16 tid,
+		     u16 *ssn, u8 buf_size, bool amsdu)
+{
+	int ret = -EOPNOTSUPP;
+
+	might_sleep();
+
+	sdata = get_bss_sdata(sdata);
+	if (!check_sdata_in_driver(sdata))
+		return -EIO;
+
+	trace_drv_ampdu_action(local, sdata, action, sta, tid,
+			       ssn, buf_size, amsdu);
+
+	if (local->ops->ampdu_action)
+		ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
+					       sta, tid, ssn, buf_size, amsdu);
+
+	trace_drv_return_int(local, ret);
+
+	return ret;
+}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 02d9133..3098709 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -137,59 +137,15 @@
 }
 #endif
 
-static inline int drv_add_interface(struct ieee80211_local *local,
-				    struct ieee80211_sub_if_data *sdata)
-{
-	int ret;
+int drv_add_interface(struct ieee80211_local *local,
+		      struct ieee80211_sub_if_data *sdata);
 
-	might_sleep();
+int drv_change_interface(struct ieee80211_local *local,
+			 struct ieee80211_sub_if_data *sdata,
+			 enum nl80211_iftype type, bool p2p);
 
-	if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
-		    (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
-		     !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
-		     !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))))
-		return -EINVAL;
-
-	trace_drv_add_interface(local, sdata);
-	ret = local->ops->add_interface(&local->hw, &sdata->vif);
-	trace_drv_return_int(local, ret);
-
-	if (ret == 0)
-		sdata->flags |= IEEE80211_SDATA_IN_DRIVER;
-
-	return ret;
-}
-
-static inline int drv_change_interface(struct ieee80211_local *local,
-				       struct ieee80211_sub_if_data *sdata,
-				       enum nl80211_iftype type, bool p2p)
-{
-	int ret;
-
-	might_sleep();
-
-	if (!check_sdata_in_driver(sdata))
-		return -EIO;
-
-	trace_drv_change_interface(local, sdata, type, p2p);
-	ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
-	trace_drv_return_int(local, ret);
-	return ret;
-}
-
-static inline void drv_remove_interface(struct ieee80211_local *local,
-					struct ieee80211_sub_if_data *sdata)
-{
-	might_sleep();
-
-	if (!check_sdata_in_driver(sdata))
-		return;
-
-	trace_drv_remove_interface(local, sdata);
-	local->ops->remove_interface(&local->hw, &sdata->vif);
-	sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
-	trace_drv_return_void(local);
-}
+void drv_remove_interface(struct ieee80211_local *local,
+			  struct ieee80211_sub_if_data *sdata);
 
 static inline int drv_config(struct ieee80211_local *local, u32 changed)
 {
@@ -260,6 +216,22 @@
 	trace_drv_return_void(local);
 }
 
+static inline void drv_config_iface_filter(struct ieee80211_local *local,
+					   struct ieee80211_sub_if_data *sdata,
+					   unsigned int filter_flags,
+					   unsigned int changed_flags)
+{
+	might_sleep();
+
+	trace_drv_config_iface_filter(local, sdata, filter_flags,
+				      changed_flags);
+	if (local->ops->config_iface_filter)
+		local->ops->config_iface_filter(&local->hw, &sdata->vif,
+						filter_flags,
+						changed_flags);
+	trace_drv_return_void(local);
+}
+
 static inline int drv_set_tim(struct ieee80211_local *local,
 			      struct ieee80211_sta *sta, bool set)
 {
@@ -580,25 +552,9 @@
 		  enum ieee80211_sta_state old_state,
 		  enum ieee80211_sta_state new_state);
 
-static inline void drv_sta_rc_update(struct ieee80211_local *local,
-				     struct ieee80211_sub_if_data *sdata,
-				     struct ieee80211_sta *sta, u32 changed)
-{
-	sdata = get_bss_sdata(sdata);
-	if (!check_sdata_in_driver(sdata))
-		return;
-
-	WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
-		(sdata->vif.type != NL80211_IFTYPE_ADHOC &&
-		 sdata->vif.type != NL80211_IFTYPE_MESH_POINT));
-
-	trace_drv_sta_rc_update(local, sdata, sta, changed);
-	if (local->ops->sta_rc_update)
-		local->ops->sta_rc_update(&local->hw, &sdata->vif,
-					  sta, changed);
-
-	trace_drv_return_void(local);
-}
+void drv_sta_rc_update(struct ieee80211_local *local,
+		       struct ieee80211_sub_if_data *sdata,
+		       struct ieee80211_sta *sta, u32 changed);
 
 static inline void drv_sta_rate_tbl_update(struct ieee80211_local *local,
 					   struct ieee80211_sub_if_data *sdata,
@@ -630,76 +586,17 @@
 	trace_drv_return_void(local);
 }
 
-static inline int drv_conf_tx(struct ieee80211_local *local,
-			      struct ieee80211_sub_if_data *sdata, u16 ac,
-			      const struct ieee80211_tx_queue_params *params)
-{
-	int ret = -EOPNOTSUPP;
+int drv_conf_tx(struct ieee80211_local *local,
+		struct ieee80211_sub_if_data *sdata, u16 ac,
+		const struct ieee80211_tx_queue_params *params);
 
-	might_sleep();
-
-	if (!check_sdata_in_driver(sdata))
-		return -EIO;
-
-	if (WARN_ONCE(params->cw_min == 0 ||
-		      params->cw_min > params->cw_max,
-		      "%s: invalid CW_min/CW_max: %d/%d\n",
-		      sdata->name, params->cw_min, params->cw_max))
-		return -EINVAL;
-
-	trace_drv_conf_tx(local, sdata, ac, params);
-	if (local->ops->conf_tx)
-		ret = local->ops->conf_tx(&local->hw, &sdata->vif,
-					  ac, params);
-	trace_drv_return_int(local, ret);
-	return ret;
-}
-
-static inline u64 drv_get_tsf(struct ieee80211_local *local,
-			      struct ieee80211_sub_if_data *sdata)
-{
-	u64 ret = -1ULL;
-
-	might_sleep();
-
-	if (!check_sdata_in_driver(sdata))
-		return ret;
-
-	trace_drv_get_tsf(local, sdata);
-	if (local->ops->get_tsf)
-		ret = local->ops->get_tsf(&local->hw, &sdata->vif);
-	trace_drv_return_u64(local, ret);
-	return ret;
-}
-
-static inline void drv_set_tsf(struct ieee80211_local *local,
-			       struct ieee80211_sub_if_data *sdata,
-			       u64 tsf)
-{
-	might_sleep();
-
-	if (!check_sdata_in_driver(sdata))
-		return;
-
-	trace_drv_set_tsf(local, sdata, tsf);
-	if (local->ops->set_tsf)
-		local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
-	trace_drv_return_void(local);
-}
-
-static inline void drv_reset_tsf(struct ieee80211_local *local,
-				 struct ieee80211_sub_if_data *sdata)
-{
-	might_sleep();
-
-	if (!check_sdata_in_driver(sdata))
-		return;
-
-	trace_drv_reset_tsf(local, sdata);
-	if (local->ops->reset_tsf)
-		local->ops->reset_tsf(&local->hw, &sdata->vif);
-	trace_drv_return_void(local);
-}
+u64 drv_get_tsf(struct ieee80211_local *local,
+		struct ieee80211_sub_if_data *sdata);
+void drv_set_tsf(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 u64 tsf);
+void drv_reset_tsf(struct ieee80211_local *local,
+		   struct ieee80211_sub_if_data *sdata);
 
 static inline int drv_tx_last_beacon(struct ieee80211_local *local)
 {
@@ -714,30 +611,11 @@
 	return ret;
 }
 
-static inline int drv_ampdu_action(struct ieee80211_local *local,
-				   struct ieee80211_sub_if_data *sdata,
-				   enum ieee80211_ampdu_mlme_action action,
-				   struct ieee80211_sta *sta, u16 tid,
-				   u16 *ssn, u8 buf_size)
-{
-	int ret = -EOPNOTSUPP;
-
-	might_sleep();
-
-	sdata = get_bss_sdata(sdata);
-	if (!check_sdata_in_driver(sdata))
-		return -EIO;
-
-	trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
-
-	if (local->ops->ampdu_action)
-		ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
-					       sta, tid, ssn, buf_size);
-
-	trace_drv_return_int(local, ret);
-
-	return ret;
-}
+int drv_ampdu_action(struct ieee80211_local *local,
+		     struct ieee80211_sub_if_data *sdata,
+		     enum ieee80211_ampdu_mlme_action action,
+		     struct ieee80211_sta *sta, u16 tid,
+		     u16 *ssn, u8 buf_size, bool amsdu);
 
 static inline int drv_get_survey(struct ieee80211_local *local, int idx,
 				struct survey_info *survey)
@@ -1066,58 +944,9 @@
 	trace_drv_return_void(local);
 }
 
-static inline int
-drv_switch_vif_chanctx(struct ieee80211_local *local,
-		       struct ieee80211_vif_chanctx_switch *vifs,
-		       int n_vifs,
-		       enum ieee80211_chanctx_switch_mode mode)
-{
-	int ret = 0;
-	int i;
-
-	if (!local->ops->switch_vif_chanctx)
-		return -EOPNOTSUPP;
-
-	for (i = 0; i < n_vifs; i++) {
-		struct ieee80211_chanctx *new_ctx =
-			container_of(vifs[i].new_ctx,
-				     struct ieee80211_chanctx,
-				     conf);
-		struct ieee80211_chanctx *old_ctx =
-			container_of(vifs[i].old_ctx,
-				     struct ieee80211_chanctx,
-				     conf);
-
-		WARN_ON_ONCE(!old_ctx->driver_present);
-		WARN_ON_ONCE((mode == CHANCTX_SWMODE_SWAP_CONTEXTS &&
-			      new_ctx->driver_present) ||
-			     (mode == CHANCTX_SWMODE_REASSIGN_VIF &&
-			      !new_ctx->driver_present));
-	}
-
-	trace_drv_switch_vif_chanctx(local, vifs, n_vifs, mode);
-	ret = local->ops->switch_vif_chanctx(&local->hw,
-					     vifs, n_vifs, mode);
-	trace_drv_return_int(local, ret);
-
-	if (!ret && mode == CHANCTX_SWMODE_SWAP_CONTEXTS) {
-		for (i = 0; i < n_vifs; i++) {
-			struct ieee80211_chanctx *new_ctx =
-				container_of(vifs[i].new_ctx,
-					     struct ieee80211_chanctx,
-					     conf);
-			struct ieee80211_chanctx *old_ctx =
-				container_of(vifs[i].old_ctx,
-					     struct ieee80211_chanctx,
-					     conf);
-
-			new_ctx->driver_present = true;
-			old_ctx->driver_present = false;
-		}
-	}
-
-	return ret;
-}
+int drv_switch_vif_chanctx(struct ieee80211_local *local,
+			   struct ieee80211_vif_chanctx_switch *vifs,
+			   int n_vifs, enum ieee80211_chanctx_switch_mode mode);
 
 static inline int drv_start_ap(struct ieee80211_local *local,
 			       struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 6e52659..f9605f1 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -419,6 +419,8 @@
 	bool downgraded;
 };
 
+DECLARE_EWMA(beacon_signal, 16, 4)
+
 struct ieee80211_if_managed {
 	struct timer_list timer;
 	struct timer_list conn_mon_timer;
@@ -490,16 +492,7 @@
 
 	s16 p2p_noa_index;
 
-	/* Signal strength from the last Beacon frame in the current BSS. */
-	int last_beacon_signal;
-
-	/*
-	 * Weighted average of the signal strength from Beacon frames in the
-	 * current BSS. This is in units of 1/16 of the signal unit to maintain
-	 * accuracy and to speed up calculations, i.e., the value need to be
-	 * divided by 16 to get the actual value.
-	 */
-	int ave_beacon_signal;
+	struct ewma_beacon_signal ave_beacon_signal;
 
 	/*
 	 * Number of Beacon frames used in ave_beacon_signal. This can be used
@@ -535,6 +528,7 @@
 	struct sk_buff *teardown_skb; /* A copy to send through the AP */
 	spinlock_t teardown_lock; /* To lock changing teardown_skb */
 	bool tdls_chan_switch_prohibited;
+	bool tdls_wider_bw_prohibited;
 
 	/* WMM-AC TSPEC support */
 	struct ieee80211_sta_tx_tspec tx_tspec[IEEE80211_NUM_ACS];
@@ -1641,6 +1635,9 @@
 struct sk_buff *
 ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
 			      struct sk_buff *skb, u32 info_flags);
+void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
+			  struct ieee80211_supported_band *sband,
+			  int retry_count, int shift, bool send_to_cooked);
 
 void ieee80211_check_fast_xmit(struct sta_info *sta);
 void ieee80211_check_fast_xmit_all(struct ieee80211_local *local);
@@ -1853,7 +1850,7 @@
 void ieee80211_dynamic_ps_timer(unsigned long data);
 void ieee80211_send_nullfunc(struct ieee80211_local *local,
 			     struct ieee80211_sub_if_data *sdata,
-			     int powersave);
+			     bool powersave);
 void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
 			     struct ieee80211_hdr *hdr);
 void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 6964fc6..42d7f0f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1204,7 +1204,7 @@
 	if (!ieee80211_sdata_running(sdata))
 		return;
 
-	if (local->scanning)
+	if (test_bit(SCAN_SW_SCANNING, &local->scanning))
 		return;
 
 	if (!ieee80211_can_run_worker(local))
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index ff79a13..9b813a2 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -543,7 +543,8 @@
 			   NL80211_FEATURE_HT_IBSS |
 			   NL80211_FEATURE_VIF_TXPOWER |
 			   NL80211_FEATURE_MAC_ON_CREATE |
-			   NL80211_FEATURE_USERSPACE_MPM;
+			   NL80211_FEATURE_USERSPACE_MPM |
+			   NL80211_FEATURE_FULL_AP_CLIENT_STATE;
 
 	if (!ops->hw_scan)
 		wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index e06a5ca..626e8de 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -94,6 +94,9 @@
 	ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
 				     ie->ht_operation, &sta_chan_def);
 
+	ieee80211_vht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
+				      ie->vht_operation, &sta_chan_def);
+
 	if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
 					 &sta_chan_def))
 		return false;
@@ -436,8 +439,6 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	struct ieee80211_channel *channel;
-	enum nl80211_channel_type channel_type =
-		cfg80211_get_chandef_type(&sdata->vif.bss_conf.chandef);
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_sta_ht_cap *ht_cap;
 	u8 *pos;
@@ -454,7 +455,10 @@
 	sband = local->hw.wiphy->bands[channel->band];
 	ht_cap = &sband->ht_cap;
 
-	if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT)
+	if (!ht_cap->ht_supported ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
 		return 0;
 
 	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation))
@@ -467,6 +471,68 @@
 	return 0;
 }
 
+int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata,
+			struct sk_buff *skb)
+{
+	struct ieee80211_local *local = sdata->local;
+	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	struct ieee80211_supported_band *sband;
+	u8 *pos;
+
+	sband = local->hw.wiphy->bands[band];
+	if (!sband->vht_cap.vht_supported ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+		return 0;
+
+	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap))
+		return -ENOMEM;
+
+	pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_cap));
+	ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, sband->vht_cap.cap);
+
+	return 0;
+}
+
+int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
+			 struct sk_buff *skb)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_channel *channel;
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_sta_vht_cap *vht_cap;
+	u8 *pos;
+
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (WARN_ON(!chanctx_conf)) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	channel = chanctx_conf->def.chan;
+	rcu_read_unlock();
+
+	sband = local->hw.wiphy->bands[channel->band];
+	vht_cap = &sband->vht_cap;
+
+	if (!vht_cap->vht_supported ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+		return 0;
+
+	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_operation))
+		return -ENOMEM;
+
+	pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
+	ieee80211_ie_build_vht_oper(pos, vht_cap,
+				    &sdata->vif.bss_conf.chandef);
+
+	return 0;
+}
+
 static void ieee80211_mesh_path_timer(unsigned long data)
 {
 	struct ieee80211_sub_if_data *sdata =
@@ -540,9 +606,9 @@
  *
  * Return the header length.
  */
-int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata,
-			      struct ieee80211s_hdr *meshhdr,
-			      const char *addr4or5, const char *addr6)
+unsigned int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata,
+				       struct ieee80211s_hdr *meshhdr,
+				       const char *addr4or5, const char *addr6)
 {
 	if (WARN_ON(!addr4or5 && addr6))
 		return 0;
@@ -637,6 +703,8 @@
 		   2 + ifmsh->mesh_id_len +
 		   2 + sizeof(struct ieee80211_meshconf_ie) +
 		   2 + sizeof(__le16) + /* awake window */
+		   2 + sizeof(struct ieee80211_vht_cap) +
+		   2 + sizeof(struct ieee80211_vht_operation) +
 		   ifmsh->ie_len;
 
 	bcn = kzalloc(sizeof(*bcn) + head_len + tail_len, GFP_KERNEL);
@@ -718,6 +786,8 @@
 	    mesh_add_meshid_ie(sdata, skb) ||
 	    mesh_add_meshconf_ie(sdata, skb) ||
 	    mesh_add_awake_window_ie(sdata, skb) ||
+	    mesh_add_vht_cap_ie(sdata, skb) ||
+	    mesh_add_vht_oper_ie(sdata, skb) ||
 	    mesh_add_vendor_ies(sdata, skb))
 		goto out_free;
 
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 50c8473..a159634 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -207,9 +207,9 @@
 /* Various */
 int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
 				  const u8 *da, const u8 *sa);
-int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata,
-			      struct ieee80211s_hdr *meshhdr,
-			      const char *addr4or5, const char *addr6);
+unsigned int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata,
+				       struct ieee80211s_hdr *meshhdr,
+				       const char *addr4or5, const char *addr6);
 int mesh_rmc_check(struct ieee80211_sub_if_data *sdata,
 		   const u8 *addr, struct ieee80211s_hdr *mesh_hdr);
 bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
@@ -227,6 +227,10 @@
 		       struct sk_buff *skb);
 int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata,
 			struct sk_buff *skb);
+int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata,
+			struct sk_buff *skb);
+int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
+			 struct sk_buff *skb);
 void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_init(void);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 5838464..a360b24 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -226,6 +226,8 @@
 			    2 + sizeof(struct ieee80211_meshconf_ie) +
 			    2 + sizeof(struct ieee80211_ht_cap) +
 			    2 + sizeof(struct ieee80211_ht_operation) +
+			    2 + sizeof(struct ieee80211_vht_cap) +
+			    2 + sizeof(struct ieee80211_vht_operation) +
 			    2 + 8 + /* peering IE */
 			    sdata->u.mesh.ie_len);
 	if (!skb)
@@ -306,7 +308,9 @@
 
 	if (action != WLAN_SP_MESH_PEERING_CLOSE) {
 		if (mesh_add_ht_cap_ie(sdata, skb) ||
-		    mesh_add_ht_oper_ie(sdata, skb))
+		    mesh_add_ht_oper_ie(sdata, skb) ||
+		    mesh_add_vht_cap_ie(sdata, skb) ||
+		    mesh_add_vht_oper_ie(sdata, skb))
 			goto free;
 	}
 
@@ -402,6 +406,9 @@
 					      elems->ht_cap_elem, sta))
 		changed |= IEEE80211_RC_BW_CHANGED;
 
+	ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+					    elems->vht_cap_elem, sta);
+
 	if (bw != sta->sta.bandwidth)
 		changed |= IEEE80211_RC_BW_CHANGED;
 
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index cd7e55e..56ef9a8 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -82,13 +82,6 @@
 		 " before disconnecting (reason 4).");
 
 /*
- * Weight given to the latest Beacon frame when calculating average signal
- * strength for Beacon frames received in the current BSS. This must be
- * between 1 and 15.
- */
-#define IEEE80211_SIGNAL_AVE_WEIGHT	3
-
-/*
  * How many Beacon frames need to have been used in average signal strength
  * before starting to indicate signal change events.
  */
@@ -943,7 +936,7 @@
 
 void ieee80211_send_nullfunc(struct ieee80211_local *local,
 			     struct ieee80211_sub_if_data *sdata,
-			     int powersave)
+			     bool powersave)
 {
 	struct sk_buff *skb;
 	struct ieee80211_hdr_3addr *nullfunc;
@@ -1427,7 +1420,7 @@
 			  msecs_to_jiffies(conf->dynamic_ps_timeout));
 	} else {
 		if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
-			ieee80211_send_nullfunc(local, sdata, 1);
+			ieee80211_send_nullfunc(local, sdata, true);
 
 		if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
 		    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
@@ -1642,7 +1635,7 @@
 				  msecs_to_jiffies(
 				  local->hw.conf.dynamic_ps_timeout));
 		} else {
-			ieee80211_send_nullfunc(local, sdata, 1);
+			ieee80211_send_nullfunc(local, sdata, true);
 			/* Flush to get the tx status of nullfunc frame */
 			ieee80211_flush_queues(local, sdata, false);
 		}
@@ -2275,7 +2268,7 @@
 
 	if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) {
 		ifmgd->nullfunc_failed = false;
-		ieee80211_send_nullfunc(sdata->local, sdata, 0);
+		ieee80211_send_nullfunc(sdata->local, sdata, false);
 	} else {
 		int ssid_len;
 
@@ -3262,16 +3255,6 @@
 	if (ifmgd->associated &&
 	    ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
 		ieee80211_reset_ap_probe(sdata);
-
-	if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
-	    ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) {
-		/* got probe response, continue with auth */
-		sdata_info(sdata, "direct probe responded\n");
-		ifmgd->auth_data->tries = 0;
-		ifmgd->auth_data->timeout = jiffies;
-		ifmgd->auth_data->timeout_started = true;
-		run_again(sdata, ifmgd->auth_data->timeout);
-	}
 }
 
 /*
@@ -3374,24 +3357,21 @@
 	bssid = ifmgd->associated->bssid;
 
 	/* Track average RSSI from the Beacon frames of the current AP */
-	ifmgd->last_beacon_signal = rx_status->signal;
 	if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) {
 		ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE;
-		ifmgd->ave_beacon_signal = rx_status->signal * 16;
+		ewma_beacon_signal_init(&ifmgd->ave_beacon_signal);
 		ifmgd->last_cqm_event_signal = 0;
 		ifmgd->count_beacon_signal = 1;
 		ifmgd->last_ave_beacon_signal = 0;
 	} else {
-		ifmgd->ave_beacon_signal =
-			(IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 +
-			 (16 - IEEE80211_SIGNAL_AVE_WEIGHT) *
-			 ifmgd->ave_beacon_signal) / 16;
 		ifmgd->count_beacon_signal++;
 	}
 
+	ewma_beacon_signal_add(&ifmgd->ave_beacon_signal, -rx_status->signal);
+
 	if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold &&
 	    ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
-		int sig = ifmgd->ave_beacon_signal;
+		int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal);
 		int last_sig = ifmgd->last_ave_beacon_signal;
 		struct ieee80211_event event = {
 			.type = RSSI_EVENT,
@@ -3418,10 +3398,11 @@
 	if (bss_conf->cqm_rssi_thold &&
 	    ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT &&
 	    !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) {
-		int sig = ifmgd->ave_beacon_signal / 16;
+		int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal);
 		int last_event = ifmgd->last_cqm_event_signal;
 		int thold = bss_conf->cqm_rssi_thold;
 		int hyst = bss_conf->cqm_rssi_hyst;
+
 		if (sig < thold &&
 		    (last_event == 0 || sig < last_event - hyst)) {
 			ifmgd->last_cqm_event_signal = sig;
@@ -3456,31 +3437,27 @@
 					  len - baselen, false, &elems,
 					  care_about_ies, ncrc);
 
-	if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) {
-		bool directed_tim = ieee80211_check_tim(elems.tim,
-							elems.tim_len,
-							ifmgd->aid);
-		if (directed_tim) {
-			if (local->hw.conf.dynamic_ps_timeout > 0) {
-				if (local->hw.conf.flags & IEEE80211_CONF_PS) {
-					local->hw.conf.flags &= ~IEEE80211_CONF_PS;
-					ieee80211_hw_config(local,
-							    IEEE80211_CONF_CHANGE_PS);
-				}
-				ieee80211_send_nullfunc(local, sdata, 0);
-			} else if (!local->pspolling && sdata->u.mgd.powersave) {
-				local->pspolling = true;
-
-				/*
-				 * Here is assumed that the driver will be
-				 * able to send ps-poll frame and receive a
-				 * response even though power save mode is
-				 * enabled, but some drivers might require
-				 * to disable power save here. This needs
-				 * to be investigated.
-				 */
-				ieee80211_send_pspoll(local, sdata);
+	if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
+	    ieee80211_check_tim(elems.tim, elems.tim_len, ifmgd->aid)) {
+		if (local->hw.conf.dynamic_ps_timeout > 0) {
+			if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+				local->hw.conf.flags &= ~IEEE80211_CONF_PS;
+				ieee80211_hw_config(local,
+						    IEEE80211_CONF_CHANGE_PS);
 			}
+			ieee80211_send_nullfunc(local, sdata, false);
+		} else if (!local->pspolling && sdata->u.mgd.powersave) {
+			local->pspolling = true;
+
+			/*
+			 * Here is assumed that the driver will be
+			 * able to send ps-poll frame and receive a
+			 * response even though power save mode is
+			 * enabled, but some drivers might require
+			 * to disable power save here. This needs
+			 * to be investigated.
+			 */
+			ieee80211_send_pspoll(local, sdata);
 		}
 	}
 
@@ -3717,12 +3694,14 @@
 				    reason);
 }
 
-static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
+static int ieee80211_auth(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data;
 	u32 tx_flags = 0;
+	u16 trans = 1;
+	u16 status = 0;
 
 	sdata_assert_lock(sdata);
 
@@ -3746,55 +3725,28 @@
 
 	drv_mgd_prepare_tx(local, sdata);
 
-	if (auth_data->bss->proberesp_ies) {
-		u16 trans = 1;
-		u16 status = 0;
+	sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
+		   auth_data->bss->bssid, auth_data->tries,
+		   IEEE80211_AUTH_MAX_TRIES);
 
-		sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
-			   auth_data->bss->bssid, auth_data->tries,
-			   IEEE80211_AUTH_MAX_TRIES);
+	auth_data->expected_transaction = 2;
 
-		auth_data->expected_transaction = 2;
-
-		if (auth_data->algorithm == WLAN_AUTH_SAE) {
-			trans = auth_data->sae_trans;
-			status = auth_data->sae_status;
-			auth_data->expected_transaction = trans;
-		}
-
-		if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
-			tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
-				   IEEE80211_TX_INTFL_MLME_CONN_TX;
-
-		ieee80211_send_auth(sdata, trans, auth_data->algorithm, status,
-				    auth_data->data, auth_data->data_len,
-				    auth_data->bss->bssid,
-				    auth_data->bss->bssid, NULL, 0, 0,
-				    tx_flags);
-	} else {
-		const u8 *ssidie;
-
-		sdata_info(sdata, "direct probe to %pM (try %d/%i)\n",
-			   auth_data->bss->bssid, auth_data->tries,
-			   IEEE80211_AUTH_MAX_TRIES);
-
-		rcu_read_lock();
-		ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID);
-		if (!ssidie) {
-			rcu_read_unlock();
-			return -EINVAL;
-		}
-		/*
-		 * Direct probe is sent to broadcast address as some APs
-		 * will not answer to direct packet in unassociated state.
-		 */
-		ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL,
-					 ssidie + 2, ssidie[1],
-					 NULL, 0, (u32) -1, true, 0,
-					 auth_data->bss->channel, false);
-		rcu_read_unlock();
+	if (auth_data->algorithm == WLAN_AUTH_SAE) {
+		trans = auth_data->sae_trans;
+		status = auth_data->sae_status;
+		auth_data->expected_transaction = trans;
 	}
 
+	if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
+		tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
+			   IEEE80211_TX_INTFL_MLME_CONN_TX;
+
+	ieee80211_send_auth(sdata, trans, auth_data->algorithm, status,
+			    auth_data->data, auth_data->data_len,
+			    auth_data->bss->bssid,
+			    auth_data->bss->bssid, NULL, 0, 0,
+			    tx_flags);
+
 	if (tx_flags == 0) {
 		auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
 		auth_data->timeout_started = true;
@@ -3874,8 +3826,7 @@
 		bool status_acked = ifmgd->status_acked;
 
 		ifmgd->status_received = false;
-		if (ifmgd->auth_data &&
-		    (ieee80211_is_probe_req(fc) || ieee80211_is_auth(fc))) {
+		if (ifmgd->auth_data && ieee80211_is_auth(fc)) {
 			if (status_acked) {
 				ifmgd->auth_data->timeout =
 					jiffies + IEEE80211_AUTH_TIMEOUT_SHORT;
@@ -3906,7 +3857,7 @@
 			 * so let's just kill the auth data
 			 */
 			ieee80211_destroy_auth_data(sdata, false);
-		} else if (ieee80211_probe_auth(sdata)) {
+		} else if (ieee80211_auth(sdata)) {
 			u8 bssid[ETH_ALEN];
 			struct ieee80211_event event = {
 				.type = MLME_EVENT,
@@ -4613,7 +4564,7 @@
 	if (err)
 		goto err_clear;
 
-	err = ieee80211_probe_auth(sdata);
+	err = ieee80211_auth(sdata);
 	if (err) {
 		sta_info_destroy_addr(sdata, req->bss->bssid);
 		goto err_clear;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index f2c75cf..0440103 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -57,7 +57,7 @@
 		 * to send a new nullfunc frame to inform the AP that we
 		 * are again sleeping.
 		 */
-		ieee80211_send_nullfunc(local, sdata, 1);
+		ieee80211_send_nullfunc(local, sdata, true);
 }
 
 /* inform AP that we are awake again, unless power save is enabled */
@@ -66,7 +66,7 @@
 	struct ieee80211_local *local = sdata->local;
 
 	if (!local->ps_sdata)
-		ieee80211_send_nullfunc(local, sdata, 0);
+		ieee80211_send_nullfunc(local, sdata, false);
 	else if (local->offchannel_ps_enabled) {
 		/*
 		 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
@@ -93,7 +93,7 @@
 		 * restart the timer now and send a nullfunc frame to inform
 		 * the AP that we are awake.
 		 */
-		ieee80211_send_nullfunc(local, sdata, 0);
+		ieee80211_send_nullfunc(local, sdata, false);
 		mod_timer(&local->dynamic_ps_timer, jiffies +
 			  msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
 	}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index b676b9f..ad88ad4 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -23,7 +23,8 @@
 
 	ieee80211_del_virtual_monitor(local);
 
-	if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) {
+	if (ieee80211_hw_check(hw, AMPDU_AGGREGATION) &&
+	    !(wowlan && wowlan->any)) {
 		mutex_lock(&local->sta_mtx);
 		list_for_each_entry(sta, &local->sta_list, list) {
 			set_sta_flag(sta, WLAN_STA_BLOCK_BA);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 9ce8883..b07e2f7 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -305,7 +305,10 @@
 		info->control.rates[0].idx = i;
 		break;
 	}
-	WARN_ON_ONCE(i == sband->n_bitrates);
+	WARN_ONCE(i == sband->n_bitrates,
+		  "no supported rates (0x%x) in rate_mask 0x%x with flags 0x%x\n",
+		  sta ? sta->supp_rates[sband->band] : 0,
+		  rate_mask, rate_flags);
 
 	info->control.rates[0].count =
 		(info->flags & IEEE80211_TX_CTL_NO_ACK) ?
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index 1db5f7c..820b0ab 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -85,12 +85,10 @@
 	file->private_data = ms;
 	p = ms->buf;
 	p += sprintf(p, "\n");
-	p += sprintf(p, "best   __________rate_________    ______"
-			"statistics______    ________last_______    "
-			"______sum-of________\n");
-	p += sprintf(p, "rate  [name idx airtime max_tp]  [ ø(tp) ø(prob) "
-			"sd(prob)]  [prob.|retry|suc|att]  "
-			"[#success | #attempts]\n");
+	p += sprintf(p,
+		     "best   __________rate_________    ________statistics________    ________last_______    ______sum-of________\n");
+	p += sprintf(p,
+		     "rate  [name idx airtime max_tp]  [avg(tp) avg(prob) sd(prob)]  [prob.|retry|suc|att]  [#success | #attempts]\n");
 
 	for (i = 0; i < mi->n_rates; i++) {
 		struct minstrel_rate *mr = &mi->r[i];
@@ -112,7 +110,7 @@
 		prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
 		eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
 
-		p += sprintf(p, "%4u.%1u   %4u.%1u   %3u.%1u    %3u.%1u"
+		p += sprintf(p, "%4u.%1u    %4u.%1u     %3u.%1u    %3u.%1u"
 				"     %3u.%1u %3u   %3u %-3u   "
 				"%9llu   %-9llu\n",
 				tp_max / 10, tp_max % 10,
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index 6822ce0..5320e35 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -86,7 +86,7 @@
 		prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
 		eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
 
-		p += sprintf(p, "%4u.%1u   %4u.%1u   %3u.%1u    %3u.%1u"
+		p += sprintf(p, "%4u.%1u    %4u.%1u     %3u.%1u    %3u.%1u"
 				"     %3u.%1u %3u   %3u %-3u   "
 				"%9llu   %-9llu\n",
 				tp_max / 10, tp_max % 10,
@@ -129,12 +129,10 @@
 	p = ms->buf;
 
 	p += sprintf(p, "\n");
-	p += sprintf(p, "              best   ____________rate__________    "
-			"______statistics______    ________last_______    "
-			"______sum-of________\n");
-	p += sprintf(p, "mode guard #  rate  [name   idx airtime  max_tp]  "
-			"[ ø(tp) ø(prob) sd(prob)]  [prob.|retry|suc|att]  [#success | "
-			"#attempts]\n");
+	p += sprintf(p,
+		     "              best   ____________rate__________    ________statistics________    ________last_______    ______sum-of________\n");
+	p += sprintf(p,
+		     "mode guard #  rate  [name   idx airtime  max_tp]  [avg(tp) avg(prob) sd(prob)]  [prob.|retry|suc|att]  [#success | #attempts]\n");
 
 	p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p);
 	for (i = 0; i < MINSTREL_CCK_GROUP; i++)
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 64f1936..c364445 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -303,7 +303,6 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_hw *hw = &local->hw;
 	struct sta_info *sta;
-	struct timespec uptime;
 	int i;
 
 	sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp);
@@ -339,8 +338,7 @@
 	/* Mark TID as unreserved */
 	sta->reserved_tid = IEEE80211_TID_UNRESERVED;
 
-	ktime_get_ts(&uptime);
-	sta->last_connected = uptime.tv_sec;
+	sta->last_connected = ktime_get_seconds();
 	ewma_signal_init(&sta->avg_signal);
 	for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
 		ewma_signal_init(&sta->chain_signal_avg[i]);
@@ -1813,7 +1811,6 @@
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	struct ieee80211_local *local = sdata->local;
 	struct rate_control_ref *ref = NULL;
-	struct timespec uptime;
 	u32 thr = 0;
 	int i, ac;
 
@@ -1838,8 +1835,7 @@
 			 BIT(NL80211_STA_INFO_RX_DROP_MISC) |
 			 BIT(NL80211_STA_INFO_BEACON_LOSS);
 
-	ktime_get_ts(&uptime);
-	sinfo->connected_time = uptime.tv_sec - sta->last_connected;
+	sinfo->connected_time = ktime_get_seconds() - sta->last_connected;
 	sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
 
 	if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index b087c71..d5ded87 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -133,6 +133,7 @@
  * @buf_size: reorder buffer size at receiver
  * @failed_bar_ssn: ssn of the last failed BAR tx attempt
  * @bar_pending: BAR needs to be re-sent
+ * @amsdu: support A-MSDU withing A-MDPU
  *
  * This structure's lifetime is managed by RCU, assignments to
  * the array holding it must hold the aggregation mutex.
@@ -158,6 +159,7 @@
 
 	u16 failed_bar_ssn;
 	bool bar_pending;
+	bool amsdu;
 };
 
 /**
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 8ba5832..98fd04c 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -668,16 +668,70 @@
 }
 EXPORT_SYMBOL(ieee80211_tx_status_noskb);
 
-void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
+void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
+			  struct ieee80211_supported_band *sband,
+			  int retry_count, int shift, bool send_to_cooked)
 {
 	struct sk_buff *skb2;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_sub_if_data *sdata;
+	struct net_device *prev_dev = NULL;
+	int rtap_len;
+
+	/* send frame to monitor interfaces now */
+	rtap_len = ieee80211_tx_radiotap_len(info);
+	if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
+		pr_err("ieee80211_tx_status: headroom too small\n");
+		dev_kfree_skb(skb);
+		return;
+	}
+	ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
+					 rtap_len, shift);
+
+	/* XXX: is this sufficient for BPF? */
+	skb_set_mac_header(skb, 0);
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	skb->pkt_type = PACKET_OTHERHOST;
+	skb->protocol = htons(ETH_P_802_2);
+	memset(skb->cb, 0, sizeof(skb->cb));
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+			if (!ieee80211_sdata_running(sdata))
+				continue;
+
+			if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
+			    !send_to_cooked)
+				continue;
+
+			if (prev_dev) {
+				skb2 = skb_clone(skb, GFP_ATOMIC);
+				if (skb2) {
+					skb2->dev = prev_dev;
+					netif_rx(skb2);
+				}
+			}
+
+			prev_dev = sdata->dev;
+		}
+	}
+	if (prev_dev) {
+		skb->dev = prev_dev;
+		netif_rx(skb);
+		skb = NULL;
+	}
+	rcu_read_unlock();
+	dev_kfree_skb(skb);
+}
+
+void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	__le16 fc;
 	struct ieee80211_supported_band *sband;
-	struct ieee80211_sub_if_data *sdata;
-	struct net_device *prev_dev = NULL;
 	struct sta_info *sta;
 	struct rhash_head *tmp;
 	int retry_count;
@@ -685,7 +739,6 @@
 	bool send_to_cooked;
 	bool acked;
 	struct ieee80211_bar *bar;
-	int rtap_len;
 	int shift = 0;
 	int tid = IEEE80211_NUM_TIDS;
 	const struct bucket_table *tbl;
@@ -878,51 +931,8 @@
 		return;
 	}
 
-	/* send frame to monitor interfaces now */
-	rtap_len = ieee80211_tx_radiotap_len(info);
-	if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
-		pr_err("ieee80211_tx_status: headroom too small\n");
-		dev_kfree_skb(skb);
-		return;
-	}
-	ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
-					 rtap_len, shift);
-
-	/* XXX: is this sufficient for BPF? */
-	skb_set_mac_header(skb, 0);
-	skb->ip_summed = CHECKSUM_UNNECESSARY;
-	skb->pkt_type = PACKET_OTHERHOST;
-	skb->protocol = htons(ETH_P_802_2);
-	memset(skb->cb, 0, sizeof(skb->cb));
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
-			if (!ieee80211_sdata_running(sdata))
-				continue;
-
-			if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
-			    !send_to_cooked)
-				continue;
-
-			if (prev_dev) {
-				skb2 = skb_clone(skb, GFP_ATOMIC);
-				if (skb2) {
-					skb2->dev = prev_dev;
-					netif_rx(skb2);
-				}
-			}
-
-			prev_dev = sdata->dev;
-		}
-	}
-	if (prev_dev) {
-		skb->dev = prev_dev;
-		netif_rx(skb);
-		skb = NULL;
-	}
-	rcu_read_unlock();
-	dev_kfree_skb(skb);
+	/* send to monitor interfaces */
+	ieee80211_tx_monitor(local, skb, sband, retry_count, shift, send_to_cooked);
 }
 EXPORT_SYMBOL(ieee80211_tx_status);
 
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 4e202d0..ecc5e2a 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -41,9 +41,11 @@
 					 struct sk_buff *skb)
 {
 	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	bool chan_switch = local->hw.wiphy->features &
 			   NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
-	bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW);
+	bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
+			  !ifmgd->tdls_wider_bw_prohibited;
 	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
 	bool vht = sband && sband->vht_cap.vht_supported;
@@ -331,8 +333,8 @@
 
 	/* proceed to downgrade the chandef until usable or the same */
 	while (uc.width > max_width &&
-	       !cfg80211_reg_can_beacon(sdata->local->hw.wiphy,
-					&uc, sdata->wdev.iftype))
+	       !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
+					      sdata->wdev.iftype))
 		ieee80211_chandef_downgrade(&uc);
 
 	if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 6f14591..314e3bd7 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -497,6 +497,36 @@
 	)
 );
 
+TRACE_EVENT(drv_config_iface_filter,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 unsigned int filter_flags,
+		 unsigned int changed_flags),
+
+	TP_ARGS(local, sdata, filter_flags, changed_flags),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		VIF_ENTRY
+		__field(unsigned int, filter_flags)
+		__field(unsigned int, changed_flags)
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		VIF_ASSIGN;
+		__entry->filter_flags = filter_flags;
+		__entry->changed_flags = changed_flags;
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT VIF_PR_FMT
+		" filter_flags: %#x changed_flags: %#x",
+		LOCAL_PR_ARG, VIF_PR_ARG, __entry->filter_flags,
+		__entry->changed_flags
+	)
+);
+
 TRACE_EVENT(drv_set_tim,
 	TP_PROTO(struct ieee80211_local *local,
 		 struct ieee80211_sta *sta, bool set),
@@ -944,9 +974,9 @@
 		 struct ieee80211_sub_if_data *sdata,
 		 enum ieee80211_ampdu_mlme_action action,
 		 struct ieee80211_sta *sta, u16 tid,
-		 u16 *ssn, u8 buf_size),
+		 u16 *ssn, u8 buf_size, bool amsdu),
 
-	TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size),
+	TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size, amsdu),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
@@ -955,6 +985,7 @@
 		__field(u16, tid)
 		__field(u16, ssn)
 		__field(u8, buf_size)
+		__field(bool, amsdu)
 		VIF_ENTRY
 	),
 
@@ -966,12 +997,13 @@
 		__entry->tid = tid;
 		__entry->ssn = ssn ? *ssn : 0;
 		__entry->buf_size = buf_size;
+		__entry->amsdu = amsdu;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d",
+		LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d amsdu:%d",
 		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action,
-		__entry->tid, __entry->buf_size
+		__entry->tid, __entry->buf_size, __entry->amsdu
 	)
 );
 
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 84e0e8c..464ba1a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2767,7 +2767,8 @@
 
 	if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
 		*ieee80211_get_qos_ctl(hdr) = tid;
-		hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
+		if (!sta->sta.txq[0])
+			hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
 	} else {
 		info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
 		hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
@@ -3512,6 +3513,12 @@
 {
 	struct ieee80211_mutable_offsets offs = {};
 	struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false);
+	struct sk_buff *copy;
+	struct ieee80211_supported_band *sband;
+	int shift;
+
+	if (!bcn)
+		return bcn;
 
 	if (tim_offset)
 		*tim_offset = offs.tim_offset;
@@ -3519,6 +3526,19 @@
 	if (tim_length)
 		*tim_length = offs.tim_length;
 
+	if (ieee80211_hw_check(hw, BEACON_TX_STATUS) ||
+	    !hw_to_local(hw)->monitors)
+		return bcn;
+
+	/* send a copy to monitor interfaces */
+	copy = skb_copy(bcn, GFP_ATOMIC);
+	if (!copy)
+		return bcn;
+
+	shift = ieee80211_vif_get_shift(vif);
+	sband = hw->wiphy->bands[ieee80211_get_sdata_band(vif_to_sdata(vif))];
+	ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false);
+
 	return bcn;
 }
 EXPORT_SYMBOL(ieee80211_beacon_get_tim);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 1104421..60c4dbf 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1966,7 +1966,7 @@
 			if (!sdata->u.mgd.associated)
 				continue;
 
-			ieee80211_send_nullfunc(local, sdata, 0);
+			ieee80211_send_nullfunc(local, sdata, false);
 		}
 	}
 
@@ -2017,8 +2017,9 @@
 		mutex_lock(&local->sta_mtx);
 
 		list_for_each_entry(sta, &local->sta_list, list) {
-			ieee80211_sta_tear_down_BA_sessions(
-					sta, AGG_STOP_LOCAL_REQUEST);
+			if (!local->resuming)
+				ieee80211_sta_tear_down_BA_sessions(
+						sta, AGG_STOP_LOCAL_REQUEST);
 			clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
 		}
 
@@ -2324,6 +2325,8 @@
 	if (chandef->center_freq2)
 		vht_oper->center_freq_seg2_idx =
 			ieee80211_frequency_to_channel(chandef->center_freq2);
+	else
+		vht_oper->center_freq_seg2_idx = 0x00;
 
 	switch (chandef->width) {
 	case NL80211_CHAN_WIDTH_160:
@@ -2541,7 +2544,7 @@
 		/* non-managed type inferfaces */
 		return 0;
 	}
-	return ifmgd->ave_beacon_signal / 16;
+	return -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal);
 }
 EXPORT_SYMBOL_GPL(ieee80211_ave_rssi);
 
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index c865ebb..57b5e94 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -266,6 +266,195 @@
 	return 0;
 }
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+static void
+ieee802154_get_llsec_table(struct wpan_phy *wpan_phy,
+			   struct wpan_dev *wpan_dev,
+			   struct ieee802154_llsec_table **table)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+
+	*table = &sdata->sec.table;
+}
+
+static void
+ieee802154_lock_llsec_table(struct wpan_phy *wpan_phy,
+			    struct wpan_dev *wpan_dev)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+
+	mutex_lock(&sdata->sec_mtx);
+}
+
+static void
+ieee802154_unlock_llsec_table(struct wpan_phy *wpan_phy,
+			      struct wpan_dev *wpan_dev)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+
+	mutex_unlock(&sdata->sec_mtx);
+}
+
+static int
+ieee802154_set_llsec_params(struct wpan_phy *wpan_phy,
+			    struct wpan_dev *wpan_dev,
+			    const struct ieee802154_llsec_params *params,
+			    int changed)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_set_params(&sdata->sec, params, changed);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_get_llsec_params(struct wpan_phy *wpan_phy,
+			    struct wpan_dev *wpan_dev,
+			    struct ieee802154_llsec_params *params)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_get_params(&sdata->sec, params);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_add_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+			 const struct ieee802154_llsec_key_id *id,
+			 const struct ieee802154_llsec_key *key)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_key_add(&sdata->sec, id, key);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_del_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+			 const struct ieee802154_llsec_key_id *id)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_key_del(&sdata->sec, id);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_add_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+			const struct ieee802154_llsec_seclevel *sl)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_seclevel_add(&sdata->sec, sl);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_del_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+			const struct ieee802154_llsec_seclevel *sl)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_seclevel_del(&sdata->sec, sl);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_add_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+		      const struct ieee802154_llsec_device *dev_desc)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_dev_add(&sdata->sec, dev_desc);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_del_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+		      __le64 extended_addr)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_dev_del(&sdata->sec, extended_addr);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_add_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+		      __le64 extended_addr,
+		      const struct ieee802154_llsec_device_key *key)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_devkey_add(&sdata->sec, extended_addr, key);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_del_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+		      __le64 extended_addr,
+		      const struct ieee802154_llsec_device_key *key)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_devkey_del(&sdata->sec, extended_addr, key);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 const struct cfg802154_ops mac802154_config_ops = {
 	.add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
 	.del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
@@ -284,4 +473,20 @@
 	.set_max_frame_retries = ieee802154_set_max_frame_retries,
 	.set_lbt_mode = ieee802154_set_lbt_mode,
 	.set_ackreq_default = ieee802154_set_ackreq_default,
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	.get_llsec_table = ieee802154_get_llsec_table,
+	.lock_llsec_table = ieee802154_lock_llsec_table,
+	.unlock_llsec_table = ieee802154_unlock_llsec_table,
+	/* TODO above */
+	.set_llsec_params = ieee802154_set_llsec_params,
+	.get_llsec_params = ieee802154_get_llsec_params,
+	.add_llsec_key = ieee802154_add_llsec_key,
+	.del_llsec_key = ieee802154_del_llsec_key,
+	.add_seclevel = ieee802154_add_seclevel,
+	.del_seclevel = ieee802154_del_seclevel,
+	.add_device = ieee802154_add_device,
+	.del_device = ieee802154_del_device,
+	.add_devkey = ieee802154_add_devkey,
+	.del_devkey = ieee802154_del_devkey,
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
 };
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index ed26952..7079cd3 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -367,12 +367,11 @@
 	return 0;
 }
 
-static int mac802154_header_create(struct sk_buff *skb,
-				   struct net_device *dev,
-				   unsigned short type,
-				   const void *daddr,
-				   const void *saddr,
-				   unsigned len)
+static int ieee802154_header_create(struct sk_buff *skb,
+				    struct net_device *dev,
+				    const struct ieee802154_addr *daddr,
+				    const struct ieee802154_addr *saddr,
+				    unsigned len)
 {
 	struct ieee802154_hdr hdr;
 	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
@@ -423,24 +422,89 @@
 	return hlen;
 }
 
+static const struct wpan_dev_header_ops ieee802154_header_ops = {
+	.create		= ieee802154_header_create,
+};
+
+/* This header create functionality assumes a 8 byte array for
+ * source and destination pointer at maximum. To adapt this for
+ * the 802.15.4 dataframe header we use extended address handling
+ * here only and intra pan connection. fc fields are mostly fallback
+ * handling. For provide dev_hard_header for dgram sockets.
+ */
+static int mac802154_header_create(struct sk_buff *skb,
+				   struct net_device *dev,
+				   unsigned short type,
+				   const void *daddr,
+				   const void *saddr,
+				   unsigned len)
+{
+	struct ieee802154_hdr hdr;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+	struct ieee802154_mac_cb cb = { };
+	int hlen;
+
+	if (!daddr)
+		return -EINVAL;
+
+	memset(&hdr.fc, 0, sizeof(hdr.fc));
+	hdr.fc.type = IEEE802154_FC_TYPE_DATA;
+	hdr.fc.ack_request = wpan_dev->ackreq;
+	hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
+
+	/* TODO currently a workaround to give zero cb block to set
+	 * security parameters defaults according MIB.
+	 */
+	if (mac802154_set_header_security(sdata, &hdr, &cb) < 0)
+		return -EINVAL;
+
+	hdr.dest.pan_id = wpan_dev->pan_id;
+	hdr.dest.mode = IEEE802154_ADDR_LONG;
+	ieee802154_be64_to_le64(&hdr.dest.extended_addr, daddr);
+
+	hdr.source.pan_id = hdr.dest.pan_id;
+	hdr.source.mode = IEEE802154_ADDR_LONG;
+
+	if (!saddr)
+		hdr.source.extended_addr = wpan_dev->extended_addr;
+	else
+		ieee802154_be64_to_le64(&hdr.source.extended_addr, saddr);
+
+	hlen = ieee802154_hdr_push(skb, &hdr);
+	if (hlen < 0)
+		return -EINVAL;
+
+	skb_reset_mac_header(skb);
+	skb->mac_len = hlen;
+
+	if (len > ieee802154_max_payload(&hdr))
+		return -EMSGSIZE;
+
+	return hlen;
+}
+
 static int
 mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
 {
 	struct ieee802154_hdr hdr;
-	struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr;
 
 	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) {
 		pr_debug("malformed packet\n");
 		return 0;
 	}
 
-	*addr = hdr.source;
-	return sizeof(*addr);
+	if (hdr.source.mode == IEEE802154_ADDR_LONG) {
+		ieee802154_le64_to_be64(haddr, &hdr.source.extended_addr);
+		return IEEE802154_EXTENDED_ADDR_LEN;
+	}
+
+	return 0;
 }
 
-static struct header_ops mac802154_header_ops = {
-	.create		= mac802154_header_create,
-	.parse		= mac802154_header_parse,
+static const struct header_ops mac802154_header_ops = {
+	.create         = mac802154_header_create,
+	.parse          = mac802154_header_parse,
 };
 
 static const struct net_device_ops mac802154_wpan_ops = {
@@ -471,9 +535,29 @@
 	dev->addr_len		= IEEE802154_EXTENDED_ADDR_LEN;
 	memset(dev->broadcast, 0xff, IEEE802154_EXTENDED_ADDR_LEN);
 
-	dev->hard_header_len	= MAC802154_FRAME_HARD_HEADER_LEN;
-	dev->needed_tailroom	= 2 + 16; /* FCS + MIC */
-	dev->mtu		= IEEE802154_MTU;
+	/* Let hard_header_len set to IEEE802154_MIN_HEADER_LEN. AF_PACKET
+	 * will not send frames without any payload, but ack frames
+	 * has no payload, so substract one that we can send a 3 bytes
+	 * frame. The xmit callback assumes at least a hard header where two
+	 * bytes fc and sequence field are set.
+	 */
+	dev->hard_header_len	= IEEE802154_MIN_HEADER_LEN - 1;
+	/* The auth_tag header is for security and places in private payload
+	 * room of mac frame which stucks between payload and FCS field.
+	 */
+	dev->needed_tailroom	= IEEE802154_MAX_AUTH_TAG_LEN +
+				  IEEE802154_FCS_LEN;
+	/* The mtu size is the payload without mac header in this case.
+	 * We have a dynamic length header with a minimum header length
+	 * which is hard_header_len. In this case we let mtu to the size
+	 * of maximum payload which is IEEE802154_MTU - IEEE802154_FCS_LEN -
+	 * hard_header_len. The FCS which is set by hardware or ndo_start_xmit
+	 * and the minimum mac header which can be evaluated inside driver
+	 * layer. The rest of mac header will be part of payload if greater
+	 * than hard_header_len.
+	 */
+	dev->mtu		= IEEE802154_MTU - IEEE802154_FCS_LEN -
+				  dev->hard_header_len;
 	dev->tx_queue_len	= 300;
 	dev->flags		= IFF_NOARP | IFF_BROADCAST;
 }
@@ -513,6 +597,7 @@
 		sdata->dev->netdev_ops = &mac802154_wpan_ops;
 		sdata->dev->ml_priv = &mac802154_mlme_wpan;
 		wpan_dev->promiscuous_mode = false;
+		wpan_dev->header_ops = &ieee802154_header_ops;
 
 		mutex_init(&sdata->sec_mtx);
 
@@ -550,7 +635,8 @@
 	if (!ndev)
 		return ERR_PTR(-ENOMEM);
 
-	ndev->needed_headroom = local->hw.extra_tx_headroom;
+	ndev->needed_headroom = local->hw.extra_tx_headroom +
+				IEEE802154_MAX_HEADER_LEN;
 
 	ret = dev_alloc_name(ndev, ndev->name);
 	if (ret < 0)
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 985e939..7799d3c 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -401,6 +401,7 @@
 
 	hash_del_rcu(&pos->bucket_s);
 	hash_del_rcu(&pos->bucket_hw);
+	list_del_rcu(&pos->dev.list);
 	call_rcu(&pos->rcu, llsec_dev_free_rcu);
 
 	return 0;
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index d1c33c1..42e9672 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -87,6 +87,10 @@
 
 	skb->dev = sdata->dev;
 
+	/* TODO this should be moved after netif_receive_skb call, otherwise
+	 * wireshark will show a mac header with security fields and the
+	 * payload is already decrypted.
+	 */
 	rc = mac802154_llsec_decrypt(&sdata->sec, skb);
 	if (rc) {
 		pr_debug("decryption failed: %i\n", rc);
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 7ed4391..3827f35 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -77,9 +77,6 @@
 		put_unaligned_le16(crc, skb_put(skb, 2));
 	}
 
-	if (skb_cow_head(skb, local->hw.extra_tx_headroom))
-		goto err_tx;
-
 	/* Stop the netif queue on each sub_if_data object. */
 	ieee802154_stop_queue(&local->hw);
 
@@ -121,6 +118,10 @@
 	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
 	int rc;
 
+	/* TODO we should move it to wpan_dev_hard_header and dev_hard_header
+	 * functions. The reason is wireshark will show a mac header which is
+	 * with security fields but the payload is not encrypted.
+	 */
 	rc = mac802154_llsec_encrypt(&sdata->sec, skb);
 	if (rc) {
 		netdev_warn(dev, "encryption failed: %i\n", rc);
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 21e70bc..67591ae 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -37,7 +37,7 @@
 	return en->labels * sizeof(struct mpls_shim_hdr);
 }
 
-int mpls_output(struct sock *sk, struct sk_buff *skb)
+int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct mpls_iptunnel_encap *tun_encap_info;
 	struct mpls_shim_hdr *hdr;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 8e47f81..2e90733 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -269,7 +269,7 @@
 		/* Optimization: we don't need to hold module
 		   reference here, since function can't sleep. --RR */
 repeat:
-		verdict = (*elemp)->hook(*elemp, skb, state);
+		verdict = (*elemp)->hook((*elemp)->priv, skb, state);
 		if (verdict != NF_ACCEPT) {
 #ifdef CONFIG_NETFILTER_DEBUG
 			if (unlikely((verdict & NF_VERDICT_MASK)
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 338b404..69ab9c26 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -519,8 +519,7 @@
 ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
 	    const struct xt_action_param *par, struct ip_set_adt_opt *opt)
 {
-	struct ip_set *set = ip_set_rcu_get(
-			dev_net(par->in ? par->in : par->out), index);
+	struct ip_set *set = ip_set_rcu_get(par->net, index);
 	int ret = 0;
 
 	BUG_ON(!set);
@@ -558,8 +557,7 @@
 ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
 	   const struct xt_action_param *par, struct ip_set_adt_opt *opt)
 {
-	struct ip_set *set = ip_set_rcu_get(
-			dev_net(par->in ? par->in : par->out), index);
+	struct ip_set *set = ip_set_rcu_get(par->net, index);
 	int ret;
 
 	BUG_ON(!set);
@@ -581,8 +579,7 @@
 ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
 	   const struct xt_action_param *par, struct ip_set_adt_opt *opt)
 {
-	struct ip_set *set = ip_set_rcu_get(
-			dev_net(par->in ? par->in : par->out), index);
+	struct ip_set *set = ip_set_rcu_get(par->net, index);
 	int ret = 0;
 
 	BUG_ON(!set);
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index dfd7b65..0328f72 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -75,7 +75,7 @@
  *	Allocate/initialize app incarnation and register it in proto apps.
  */
 static int
-ip_vs_app_inc_new(struct net *net, struct ip_vs_app *app, __u16 proto,
+ip_vs_app_inc_new(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
 		  __u16 port)
 {
 	struct ip_vs_protocol *pp;
@@ -107,7 +107,7 @@
 		}
 	}
 
-	ret = pp->register_app(net, inc);
+	ret = pp->register_app(ipvs, inc);
 	if (ret)
 		goto out;
 
@@ -127,7 +127,7 @@
  *	Release app incarnation
  */
 static void
-ip_vs_app_inc_release(struct net *net, struct ip_vs_app *inc)
+ip_vs_app_inc_release(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
 	struct ip_vs_protocol *pp;
 
@@ -135,7 +135,7 @@
 		return;
 
 	if (pp->unregister_app)
-		pp->unregister_app(net, inc);
+		pp->unregister_app(ipvs, inc);
 
 	IP_VS_DBG(9, "%s App %s:%u unregistered\n",
 		  pp->name, inc->name, ntohs(inc->port));
@@ -175,14 +175,14 @@
  *	Register an application incarnation in protocol applications
  */
 int
-register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
 		       __u16 port)
 {
 	int result;
 
 	mutex_lock(&__ip_vs_app_mutex);
 
-	result = ip_vs_app_inc_new(net, app, proto, port);
+	result = ip_vs_app_inc_new(ipvs, app, proto, port);
 
 	mutex_unlock(&__ip_vs_app_mutex);
 
@@ -191,15 +191,11 @@
 
 
 /* Register application for netns */
-struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app)
+struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_app *a;
 	int err = 0;
 
-	if (!ipvs)
-		return ERR_PTR(-ENOENT);
-
 	mutex_lock(&__ip_vs_app_mutex);
 
 	list_for_each_entry(a, &ipvs->app_list, a_list) {
@@ -230,21 +226,17 @@
  *	We are sure there are no app incarnations attached to services
  *	Caller should use synchronize_rcu() or rcu_barrier()
  */
-void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
+void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_app *a, *anxt, *inc, *nxt;
 
-	if (!ipvs)
-		return;
-
 	mutex_lock(&__ip_vs_app_mutex);
 
 	list_for_each_entry_safe(a, anxt, &ipvs->app_list, a_list) {
 		if (app && strcmp(app->name, a->name))
 			continue;
 		list_for_each_entry_safe(inc, nxt, &a->incs_list, a_list) {
-			ip_vs_app_inc_release(net, inc);
+			ip_vs_app_inc_release(ipvs, inc);
 		}
 
 		list_del(&a->a_list);
@@ -611,17 +603,19 @@
 };
 #endif
 
-int __net_init ip_vs_app_net_init(struct net *net)
+int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct net *net = ipvs->net;
 
 	INIT_LIST_HEAD(&ipvs->app_list);
 	proc_create("ip_vs_app", 0, net->proc_net, &ip_vs_app_fops);
 	return 0;
 }
 
-void __net_exit ip_vs_app_net_cleanup(struct net *net)
+void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs)
 {
-	unregister_ip_vs_app(net, NULL /* all */);
+	struct net *net = ipvs->net;
+
+	unregister_ip_vs_app(ipvs, NULL /* all */);
 	remove_proc_entry("ip_vs_app", net->proc_net);
 }
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index b0f7b62..d1d168c 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -108,7 +108,7 @@
 /*
  *	Returns hash value for IPVS connection entry
  */
-static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned int proto,
+static unsigned int ip_vs_conn_hashkey(struct netns_ipvs *ipvs, int af, unsigned int proto,
 				       const union nf_inet_addr *addr,
 				       __be16 port)
 {
@@ -116,11 +116,11 @@
 	if (af == AF_INET6)
 		return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
 				    (__force u32)port, proto, ip_vs_conn_rnd) ^
-			((size_t)net>>8)) & ip_vs_conn_tab_mask;
+			((size_t)ipvs>>8)) & ip_vs_conn_tab_mask;
 #endif
 	return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
 			    ip_vs_conn_rnd) ^
-		((size_t)net>>8)) & ip_vs_conn_tab_mask;
+		((size_t)ipvs>>8)) & ip_vs_conn_tab_mask;
 }
 
 static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
@@ -141,14 +141,14 @@
 		port = p->vport;
 	}
 
-	return ip_vs_conn_hashkey(p->net, p->af, p->protocol, addr, port);
+	return ip_vs_conn_hashkey(p->ipvs, p->af, p->protocol, addr, port);
 }
 
 static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
 {
 	struct ip_vs_conn_param p;
 
-	ip_vs_conn_fill_param(ip_vs_conn_net(cp), cp->af, cp->protocol,
+	ip_vs_conn_fill_param(cp->ipvs, cp->af, cp->protocol,
 			      &cp->caddr, cp->cport, NULL, 0, &p);
 
 	if (cp->pe) {
@@ -279,7 +279,7 @@
 		    ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
 		    ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
 		    p->protocol == cp->protocol &&
-		    ip_vs_conn_net_eq(cp, p->net)) {
+		    cp->ipvs == p->ipvs) {
 			if (!__ip_vs_conn_get(cp))
 				continue;
 			/* HIT */
@@ -314,33 +314,34 @@
 }
 
 static int
-ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb,
+ip_vs_conn_fill_param_proto(struct netns_ipvs *ipvs,
+			    int af, const struct sk_buff *skb,
 			    const struct ip_vs_iphdr *iph,
-			    int inverse, struct ip_vs_conn_param *p)
+			    struct ip_vs_conn_param *p)
 {
 	__be16 _ports[2], *pptr;
-	struct net *net = skb_net(skb);
 
 	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
 	if (pptr == NULL)
 		return 1;
 
-	if (likely(!inverse))
-		ip_vs_conn_fill_param(net, af, iph->protocol, &iph->saddr,
+	if (likely(!ip_vs_iph_inverse(iph)))
+		ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->saddr,
 				      pptr[0], &iph->daddr, pptr[1], p);
 	else
-		ip_vs_conn_fill_param(net, af, iph->protocol, &iph->daddr,
+		ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->daddr,
 				      pptr[1], &iph->saddr, pptr[0], p);
 	return 0;
 }
 
 struct ip_vs_conn *
-ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
-			const struct ip_vs_iphdr *iph, int inverse)
+ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af,
+			const struct sk_buff *skb,
+			const struct ip_vs_iphdr *iph)
 {
 	struct ip_vs_conn_param p;
 
-	if (ip_vs_conn_fill_param_proto(af, skb, iph, inverse, &p))
+	if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
 		return NULL;
 
 	return ip_vs_conn_in_get(&p);
@@ -359,7 +360,7 @@
 
 	hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
 		if (unlikely(p->pe_data && p->pe->ct_match)) {
-			if (!ip_vs_conn_net_eq(cp, p->net))
+			if (cp->ipvs != p->ipvs)
 				continue;
 			if (p->pe == cp->pe && p->pe->ct_match(p, cp)) {
 				if (__ip_vs_conn_get(cp))
@@ -377,7 +378,7 @@
 		    p->vport == cp->vport && p->cport == cp->cport &&
 		    cp->flags & IP_VS_CONN_F_TEMPLATE &&
 		    p->protocol == cp->protocol &&
-		    ip_vs_conn_net_eq(cp, p->net)) {
+		    cp->ipvs == p->ipvs) {
 			if (__ip_vs_conn_get(cp))
 				goto out;
 		}
@@ -418,7 +419,7 @@
 		    ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
 		    ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
 		    p->protocol == cp->protocol &&
-		    ip_vs_conn_net_eq(cp, p->net)) {
+		    cp->ipvs == p->ipvs) {
 			if (!__ip_vs_conn_get(cp))
 				continue;
 			/* HIT */
@@ -439,12 +440,13 @@
 }
 
 struct ip_vs_conn *
-ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
-			 const struct ip_vs_iphdr *iph, int inverse)
+ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
+			 const struct sk_buff *skb,
+			 const struct ip_vs_iphdr *iph)
 {
 	struct ip_vs_conn_param p;
 
-	if (ip_vs_conn_fill_param_proto(af, skb, iph, inverse, &p))
+	if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
 		return NULL;
 
 	return ip_vs_conn_out_get(&p);
@@ -638,7 +640,7 @@
 	 * so we can make the assumption that the svc_af is the same as the
 	 * dest_af
 	 */
-	dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, cp->af, &cp->daddr,
+	dest = ip_vs_find_dest(cp->ipvs, cp->af, cp->af, &cp->daddr,
 			       cp->dport, &cp->vaddr, cp->vport,
 			       cp->protocol, cp->fwmark, cp->flags);
 	if (dest) {
@@ -668,7 +670,7 @@
 #endif
 			ip_vs_bind_xmit(cp);
 
-		pd = ip_vs_proto_data_get(ip_vs_conn_net(cp), cp->protocol);
+		pd = ip_vs_proto_data_get(cp->ipvs, cp->protocol);
 		if (pd && atomic_read(&pd->appcnt))
 			ip_vs_bind_app(cp, pd->pp);
 	}
@@ -746,7 +748,7 @@
 int ip_vs_check_template(struct ip_vs_conn *ct)
 {
 	struct ip_vs_dest *dest = ct->dest;
-	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(ct));
+	struct netns_ipvs *ipvs = ct->ipvs;
 
 	/*
 	 * Checking the dest server status.
@@ -800,8 +802,7 @@
 static void ip_vs_conn_expire(unsigned long data)
 {
 	struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
-	struct net *net = ip_vs_conn_net(cp);
-	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct netns_ipvs *ipvs = cp->ipvs;
 
 	/*
 	 *	do I control anybody?
@@ -847,7 +848,7 @@
 	cp->timeout = 60*HZ;
 
 	if (ipvs->sync_state & IP_VS_STATE_MASTER)
-		ip_vs_sync_conn(net, cp, sysctl_sync_threshold(ipvs));
+		ip_vs_sync_conn(ipvs, cp, sysctl_sync_threshold(ipvs));
 
 	ip_vs_conn_put(cp);
 }
@@ -875,8 +876,8 @@
 	       struct ip_vs_dest *dest, __u32 fwmark)
 {
 	struct ip_vs_conn *cp;
-	struct netns_ipvs *ipvs = net_ipvs(p->net);
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->net,
+	struct netns_ipvs *ipvs = p->ipvs;
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->ipvs,
 							   p->protocol);
 
 	cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC);
@@ -887,7 +888,7 @@
 
 	INIT_HLIST_NODE(&cp->c_list);
 	setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
-	ip_vs_conn_net_set(cp, p->net);
+	cp->ipvs	   = ipvs;
 	cp->af		   = p->af;
 	cp->daf		   = dest_af;
 	cp->protocol	   = p->protocol;
@@ -1061,7 +1062,7 @@
 		size_t len = 0;
 		char dbuf[IP_VS_ADDRSTRLEN];
 
-		if (!ip_vs_conn_net_eq(cp, net))
+		if (!net_eq(cp->ipvs->net, net))
 			return 0;
 		if (cp->pe_data) {
 			pe_data[0] = ' ';
@@ -1146,7 +1147,7 @@
 		const struct ip_vs_conn *cp = v;
 		struct net *net = seq_file_net(seq);
 
-		if (!ip_vs_conn_net_eq(cp, net))
+		if (!net_eq(cp->ipvs->net, net))
 			return 0;
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -1240,7 +1241,7 @@
 }
 
 /* Called from keventd and must protect itself from softirqs */
-void ip_vs_random_dropentry(struct net *net)
+void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
 {
 	int idx;
 	struct ip_vs_conn *cp, *cp_c;
@@ -1256,7 +1257,7 @@
 			if (cp->flags & IP_VS_CONN_F_TEMPLATE)
 				/* connection template */
 				continue;
-			if (!ip_vs_conn_net_eq(cp, net))
+			if (cp->ipvs != ipvs)
 				continue;
 			if (cp->protocol == IPPROTO_TCP) {
 				switch(cp->state) {
@@ -1308,18 +1309,17 @@
 /*
  *      Flush all the connection entries in the ip_vs_conn_tab
  */
-static void ip_vs_conn_flush(struct net *net)
+static void ip_vs_conn_flush(struct netns_ipvs *ipvs)
 {
 	int idx;
 	struct ip_vs_conn *cp, *cp_c;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 flush_again:
 	rcu_read_lock();
 	for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
 
 		hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
-			if (!ip_vs_conn_net_eq(cp, net))
+			if (cp->ipvs != ipvs)
 				continue;
 			IP_VS_DBG(4, "del connection\n");
 			ip_vs_conn_expire_now(cp);
@@ -1345,9 +1345,9 @@
 /*
  * per netns init and exit
  */
-int __net_init ip_vs_conn_net_init(struct net *net)
+int __net_init ip_vs_conn_net_init(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct net *net = ipvs->net;
 
 	atomic_set(&ipvs->conn_count, 0);
 
@@ -1356,10 +1356,12 @@
 	return 0;
 }
 
-void __net_exit ip_vs_conn_net_cleanup(struct net *net)
+void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs)
 {
+	struct net *net = ipvs->net;
+
 	/* flush all the connection entries first */
-	ip_vs_conn_flush(net);
+	ip_vs_conn_flush(ipvs);
 	remove_proc_entry("ip_vs_conn", net->proc_net);
 	remove_proc_entry("ip_vs_conn_sync", net->proc_net);
 }
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 38fbc19..07a791e 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -112,7 +112,7 @@
 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 {
 	struct ip_vs_dest *dest = cp->dest;
-	struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+	struct netns_ipvs *ipvs = cp->ipvs;
 
 	if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
 		struct ip_vs_cpu_stats *s;
@@ -146,7 +146,7 @@
 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 {
 	struct ip_vs_dest *dest = cp->dest;
-	struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+	struct netns_ipvs *ipvs = cp->ipvs;
 
 	if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
 		struct ip_vs_cpu_stats *s;
@@ -179,7 +179,7 @@
 static inline void
 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
 {
-	struct netns_ipvs *ipvs = net_ipvs(svc->net);
+	struct netns_ipvs *ipvs = svc->ipvs;
 	struct ip_vs_cpu_stats *s;
 
 	s = this_cpu_ptr(cp->dest->stats.cpustats);
@@ -215,7 +215,7 @@
 			      const union nf_inet_addr *vaddr, __be16 vport,
 			      struct ip_vs_conn_param *p)
 {
-	ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
+	ip_vs_conn_fill_param(svc->ipvs, svc->af, protocol, caddr, cport, vaddr,
 			      vport, p);
 	p->pe = rcu_dereference(svc->pe);
 	if (p->pe && p->pe->fill_param)
@@ -245,20 +245,30 @@
 	const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
 	union nf_inet_addr snet;	/* source network of the client,
 					   after masking */
+	const union nf_inet_addr *src_addr, *dst_addr;
+
+	if (likely(!ip_vs_iph_inverse(iph))) {
+		src_addr = &iph->saddr;
+		dst_addr = &iph->daddr;
+	} else {
+		src_addr = &iph->daddr;
+		dst_addr = &iph->saddr;
+	}
+
 
 	/* Mask saddr with the netmask to adjust template granularity */
 #ifdef CONFIG_IP_VS_IPV6
 	if (svc->af == AF_INET6)
-		ipv6_addr_prefix(&snet.in6, &iph->saddr.in6,
+		ipv6_addr_prefix(&snet.in6, &src_addr->in6,
 				 (__force __u32) svc->netmask);
 	else
 #endif
-		snet.ip = iph->saddr.ip & svc->netmask;
+		snet.ip = src_addr->ip & svc->netmask;
 
 	IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
 		      "mnet %s\n",
-		      IP_VS_DBG_ADDR(svc->af, &iph->saddr), ntohs(src_port),
-		      IP_VS_DBG_ADDR(svc->af, &iph->daddr), ntohs(dst_port),
+		      IP_VS_DBG_ADDR(svc->af, src_addr), ntohs(src_port),
+		      IP_VS_DBG_ADDR(svc->af, dst_addr), ntohs(dst_port),
 		      IP_VS_DBG_ADDR(svc->af, &snet));
 
 	/*
@@ -276,7 +286,7 @@
 	 */
 	{
 		int protocol = iph->protocol;
-		const union nf_inet_addr *vaddr = &iph->daddr;
+		const union nf_inet_addr *vaddr = dst_addr;
 		__be16 vport = 0;
 
 		if (dst_port == svc->port) {
@@ -366,8 +376,8 @@
 	/*
 	 *    Create a new connection according to the template
 	 */
-	ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr,
-			      src_port, &iph->daddr, dst_port, &param);
+	ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol, src_addr,
+			      src_port, dst_addr, dst_port, &param);
 
 	cp = ip_vs_conn_new(&param, dest->af, &dest->addr, dport, flags, dest,
 			    skb->mark);
@@ -418,7 +428,8 @@
 	struct ip_vs_conn *cp = NULL;
 	struct ip_vs_scheduler *sched;
 	struct ip_vs_dest *dest;
-	__be16 _ports[2], *pptr;
+	__be16 _ports[2], *pptr, cport, vport;
+	const void *caddr, *vaddr;
 	unsigned int flags;
 
 	*ignored = 1;
@@ -429,14 +440,26 @@
 	if (pptr == NULL)
 		return NULL;
 
+	if (likely(!ip_vs_iph_inverse(iph))) {
+		cport = pptr[0];
+		caddr = &iph->saddr;
+		vport = pptr[1];
+		vaddr = &iph->daddr;
+	} else {
+		cport = pptr[1];
+		caddr = &iph->daddr;
+		vport = pptr[0];
+		vaddr = &iph->saddr;
+	}
+
 	/*
 	 * FTPDATA needs this check when using local real server.
 	 * Never schedule Active FTPDATA connections from real server.
 	 * For LVS-NAT they must be already created. For other methods
 	 * with persistence the connection is created on SYN+ACK.
 	 */
-	if (pptr[0] == FTPDATA) {
-		IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
+	if (cport == FTPDATA) {
+		IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
 			      "Not scheduling FTPDATA");
 		return NULL;
 	}
@@ -444,19 +467,25 @@
 	/*
 	 *    Do not schedule replies from local real server.
 	 */
-	if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
-	    (cp = pp->conn_in_get(svc->af, skb, iph, 1))) {
-		IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
-			      "Not scheduling reply for existing connection");
-		__ip_vs_conn_put(cp);
-		return NULL;
+	if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK)) {
+		iph->hdr_flags ^= IP_VS_HDR_INVERSE;
+		cp = pp->conn_in_get(svc->ipvs, svc->af, skb, iph);
+		iph->hdr_flags ^= IP_VS_HDR_INVERSE;
+
+		if (cp) {
+			IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
+				      "Not scheduling reply for existing"
+				      " connection");
+			__ip_vs_conn_put(cp);
+			return NULL;
+		}
 	}
 
 	/*
 	 *    Persistent service
 	 */
 	if (svc->flags & IP_VS_SVC_F_PERSISTENT)
-		return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored,
+		return ip_vs_sched_persist(svc, skb, cport, vport, ignored,
 					   iph);
 
 	*ignored = 0;
@@ -464,7 +493,7 @@
 	/*
 	 *    Non-persistent service
 	 */
-	if (!svc->fwmark && pptr[1] != svc->port) {
+	if (!svc->fwmark && vport != svc->port) {
 		if (!svc->port)
 			pr_err("Schedule: port zero only supported "
 			       "in persistent services, "
@@ -495,11 +524,10 @@
 	{
 		struct ip_vs_conn_param p;
 
-		ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
-				      &iph->saddr, pptr[0], &iph->daddr,
-				      pptr[1], &p);
+		ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
+				      caddr, cport, vaddr, vport, &p);
 		cp = ip_vs_conn_new(&p, dest->af, &dest->addr,
-				    dest->port ? dest->port : pptr[1],
+				    dest->port ? dest->port : vport,
 				    flags, dest, skb->mark);
 		if (!cp) {
 			*ignored = -1;
@@ -519,6 +547,17 @@
 	return cp;
 }
 
+#ifdef CONFIG_SYSCTL
+static inline int ip_vs_addr_is_unicast(struct net *net, int af,
+					union nf_inet_addr *addr)
+{
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		return ipv6_addr_type(&addr->in6) & IPV6_ADDR_UNICAST;
+#endif
+	return (inet_addr_type(net, addr->ip) == RTN_UNICAST);
+}
+#endif
 
 /*
  *  Pass or drop the packet.
@@ -528,33 +567,21 @@
 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
 		struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
 {
-	__be16 _ports[2], *pptr;
-#ifdef CONFIG_SYSCTL
-	struct net *net;
-	struct netns_ipvs *ipvs;
-	int unicast;
-#endif
+	__be16 _ports[2], *pptr, dport;
+	struct netns_ipvs *ipvs = svc->ipvs;
+	struct net *net = ipvs->net;
 
 	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
-	if (pptr == NULL) {
+	if (!pptr)
 		return NF_DROP;
-	}
-
-#ifdef CONFIG_SYSCTL
-	net = skb_net(skb);
-
-#ifdef CONFIG_IP_VS_IPV6
-	if (svc->af == AF_INET6)
-		unicast = ipv6_addr_type(&iph->daddr.in6) & IPV6_ADDR_UNICAST;
-	else
-#endif
-		unicast = (inet_addr_type(net, iph->daddr.ip) == RTN_UNICAST);
+	dport = likely(!ip_vs_iph_inverse(iph)) ? pptr[1] : pptr[0];
 
 	/* if it is fwmark-based service, the cache_bypass sysctl is up
 	   and the destination is a non-local unicast, then create
 	   a cache_bypass connection entry */
-	ipvs = net_ipvs(net);
-	if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
+	if (sysctl_cache_bypass(ipvs) && svc->fwmark &&
+	    !(iph->hdr_flags & (IP_VS_HDR_INVERSE | IP_VS_HDR_ICMP)) &&
+	    ip_vs_addr_is_unicast(net, svc->af, &iph->daddr)) {
 		int ret;
 		struct ip_vs_conn *cp;
 		unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
@@ -566,7 +593,7 @@
 		IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
 		{
 			struct ip_vs_conn_param p;
-			ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
+			ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
 					      &iph->saddr, pptr[0],
 					      &iph->daddr, pptr[1], &p);
 			cp = ip_vs_conn_new(&p, svc->af, &daddr, 0,
@@ -590,7 +617,6 @@
 		ip_vs_conn_put(cp);
 		return ret;
 	}
-#endif
 
 	/*
 	 * When the virtual ftp service is presented, packets destined
@@ -598,9 +624,12 @@
 	 * listed in the ipvs table), pass the packets, because it is
 	 * not ipvs job to decide to drop the packets.
 	 */
-	if ((svc->port == FTPPORT) && (pptr[1] != FTPPORT))
+	if (svc->port == FTPPORT && dport != FTPPORT)
 		return NF_ACCEPT;
 
+	if (unlikely(ip_vs_iph_icmp(iph)))
+		return NF_DROP;
+
 	/*
 	 * Notify the client that the destination is unreachable, and
 	 * release the socket buffer.
@@ -610,11 +639,8 @@
 	 */
 #ifdef CONFIG_IP_VS_IPV6
 	if (svc->af == AF_INET6) {
-		if (!skb->dev) {
-			struct net *net_ = dev_net(skb_dst(skb)->dev);
-
-			skb->dev = net_->loopback_dev;
-		}
+		if (!skb->dev)
+			skb->dev = net->loopback_dev;
 		icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 	} else
 #endif
@@ -625,15 +651,13 @@
 
 #ifdef CONFIG_SYSCTL
 
-static int sysctl_snat_reroute(struct sk_buff *skb)
+static int sysctl_snat_reroute(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
 	return ipvs->sysctl_snat_reroute;
 }
 
-static int sysctl_nat_icmp_send(struct net *net)
+static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	return ipvs->sysctl_nat_icmp_send;
 }
 
@@ -644,8 +668,8 @@
 
 #else
 
-static int sysctl_snat_reroute(struct sk_buff *skb) { return 0; }
-static int sysctl_nat_icmp_send(struct net *net) { return 0; }
+static int sysctl_snat_reroute(struct netns_ipvs *ipvs) { return 0; }
+static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) { return 0; }
 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
 
 #endif
@@ -664,12 +688,13 @@
 	return IP_DEFRAG_VS_OUT;
 }
 
-static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
+static inline int ip_vs_gather_frags(struct netns_ipvs *ipvs,
+				     struct sk_buff *skb, u_int32_t user)
 {
 	int err;
 
 	local_bh_disable();
-	err = ip_defrag(skb, user);
+	err = ip_defrag(ipvs->net, skb, user);
 	local_bh_enable();
 	if (!err)
 		ip_send_check(ip_hdr(skb));
@@ -677,10 +702,10 @@
 	return err;
 }
 
-static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
-				 unsigned int hooknum)
+static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af,
+				 struct sk_buff *skb, unsigned int hooknum)
 {
-	if (!sysctl_snat_reroute(skb))
+	if (!sysctl_snat_reroute(ipvs))
 		return 0;
 	/* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
 	if (NF_INET_LOCAL_IN == hooknum)
@@ -690,12 +715,12 @@
 		struct dst_entry *dst = skb_dst(skb);
 
 		if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
-		    ip6_route_me_harder(skb) != 0)
+		    ip6_route_me_harder(ipvs->net, skb) != 0)
 			return 1;
 	} else
 #endif
 		if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
-		    ip_route_me_harder(skb, RTN_LOCAL) != 0)
+		    ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0)
 			return 1;
 
 	return 0;
@@ -848,7 +873,7 @@
 #endif
 		ip_vs_nat_icmp(skb, pp, cp, 1);
 
-	if (ip_vs_route_me_harder(af, skb, hooknum))
+	if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
 		goto out;
 
 	/* do the statistics and put it back */
@@ -872,8 +897,8 @@
  *	Find any that might be relevant, check against existing connections.
  *	Currently handles error types - unreachable, quench, ttl exceeded.
  */
-static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
-			  unsigned int hooknum)
+static int ip_vs_out_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb,
+			  int *related, unsigned int hooknum)
 {
 	struct iphdr *iph;
 	struct icmphdr	_icmph, *ic;
@@ -888,7 +913,7 @@
 
 	/* reassemble IP fragments */
 	if (ip_is_fragment(ip_hdr(skb))) {
-		if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
+		if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
 			return NF_STOLEN;
 	}
 
@@ -934,10 +959,10 @@
 	IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
 		      "Checking outgoing ICMP for");
 
-	ip_vs_fill_ip4hdr(cih, &ciph);
-	ciph.len += offset;
+	ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, true, &ciph);
+
 	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_out_get(AF_INET, skb, &ciph, 1);
+	cp = pp->conn_out_get(ipvs, AF_INET, skb, &ciph);
 	if (!cp)
 		return NF_ACCEPT;
 
@@ -947,16 +972,16 @@
 }
 
 #ifdef CONFIG_IP_VS_IPV6
-static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
-			     unsigned int hooknum, struct ip_vs_iphdr *ipvsh)
+static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
+			     int *related,  unsigned int hooknum,
+			     struct ip_vs_iphdr *ipvsh)
 {
 	struct icmp6hdr	_icmph, *ic;
-	struct ipv6hdr _ip6h, *ip6h; /* The ip header contained within ICMP */
 	struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
 	struct ip_vs_conn *cp;
 	struct ip_vs_protocol *pp;
 	union nf_inet_addr snet;
-	unsigned int writable;
+	unsigned int offset;
 
 	*related = 1;
 	ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh);
@@ -984,31 +1009,23 @@
 		  ic->icmp6_type, ntohs(icmpv6_id(ic)),
 		  &ipvsh->saddr, &ipvsh->daddr);
 
-	/* Now find the contained IP header */
-	ciph.len = ipvsh->len + sizeof(_icmph);
-	ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
-	if (ip6h == NULL)
+	if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, ipvsh->len + sizeof(_icmph),
+				     true, &ciph))
 		return NF_ACCEPT; /* The packet looks wrong, ignore */
-	ciph.saddr.in6 = ip6h->saddr; /* conn_out_get() handles reverse order */
-	ciph.daddr.in6 = ip6h->daddr;
-	/* skip possible IPv6 exthdrs of contained IPv6 packet */
-	ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
-	if (ciph.protocol < 0)
-		return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
 
 	pp = ip_vs_proto_get(ciph.protocol);
 	if (!pp)
 		return NF_ACCEPT;
 
 	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_out_get(AF_INET6, skb, &ciph, 1);
+	cp = pp->conn_out_get(ipvs, AF_INET6, skb, &ciph);
 	if (!cp)
 		return NF_ACCEPT;
 
 	snet.in6 = ciph.saddr.in6;
-	writable = ciph.len;
+	offset = ciph.len;
 	return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
-				    pp, writable, sizeof(struct ipv6hdr),
+				    pp, offset, sizeof(struct ipv6hdr),
 				    hooknum);
 }
 #endif
@@ -1093,7 +1110,7 @@
 {
 	struct ip_vs_protocol *pp = pd->pp;
 
-	IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
+	IP_VS_DBG_PKT(11, af, pp, skb, iph->off, "Outgoing packet");
 
 	if (!skb_make_writable(skb, iph->len))
 		goto drop;
@@ -1127,10 +1144,10 @@
 	 * if it came from this machine itself.  So re-compute
 	 * the routing information.
 	 */
-	if (ip_vs_route_me_harder(af, skb, hooknum))
+	if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
 		goto drop;
 
-	IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
+	IP_VS_DBG_PKT(10, af, pp, skb, iph->off, "After SNAT");
 
 	ip_vs_out_stats(cp, skb);
 	ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
@@ -1155,9 +1172,9 @@
  *	Check if outgoing packet belongs to the established ip_vs_conn.
  */
 static unsigned int
-ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
+ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
 {
-	struct net *net = NULL;
+	struct net *net = ipvs->net;
 	struct ip_vs_iphdr iph;
 	struct ip_vs_protocol *pp;
 	struct ip_vs_proto_data *pd;
@@ -1182,16 +1199,15 @@
 	if (unlikely(!skb_dst(skb)))
 		return NF_ACCEPT;
 
-	net = skb_net(skb);
-	if (!net_ipvs(net)->enable)
+	if (!ipvs->enable)
 		return NF_ACCEPT;
 
-	ip_vs_fill_iph_skb(af, skb, &iph);
+	ip_vs_fill_iph_skb(af, skb, false, &iph);
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6) {
 		if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
 			int related;
-			int verdict = ip_vs_out_icmp_v6(skb, &related,
+			int verdict = ip_vs_out_icmp_v6(ipvs, skb, &related,
 							hooknum, &iph);
 
 			if (related)
@@ -1201,13 +1217,13 @@
 #endif
 		if (unlikely(iph.protocol == IPPROTO_ICMP)) {
 			int related;
-			int verdict = ip_vs_out_icmp(skb, &related, hooknum);
+			int verdict = ip_vs_out_icmp(ipvs, skb, &related, hooknum);
 
 			if (related)
 				return verdict;
 		}
 
-	pd = ip_vs_proto_data_get(net, iph.protocol);
+	pd = ip_vs_proto_data_get(ipvs, iph.protocol);
 	if (unlikely(!pd))
 		return NF_ACCEPT;
 	pp = pd->pp;
@@ -1217,21 +1233,21 @@
 	if (af == AF_INET)
 #endif
 		if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
-			if (ip_vs_gather_frags(skb,
+			if (ip_vs_gather_frags(ipvs, skb,
 					       ip_vs_defrag_user(hooknum)))
 				return NF_STOLEN;
 
-			ip_vs_fill_ip4hdr(skb_network_header(skb), &iph);
+			ip_vs_fill_iph_skb(AF_INET, skb, false, &iph);
 		}
 
 	/*
 	 * Check if the packet belongs to an existing entry
 	 */
-	cp = pp->conn_out_get(af, skb, &iph, 0);
+	cp = pp->conn_out_get(ipvs, af, skb, &iph);
 
 	if (likely(cp))
 		return handle_response(af, skb, pd, cp, &iph, hooknum);
-	if (sysctl_nat_icmp_send(net) &&
+	if (sysctl_nat_icmp_send(ipvs) &&
 	    (pp->protocol == IPPROTO_TCP ||
 	     pp->protocol == IPPROTO_UDP ||
 	     pp->protocol == IPPROTO_SCTP)) {
@@ -1241,7 +1257,7 @@
 					 sizeof(_ports), _ports, &iph);
 		if (pptr == NULL)
 			return NF_ACCEPT;	/* Not for me */
-		if (ip_vs_has_real_service(net, af, iph.protocol, &iph.saddr,
+		if (ip_vs_has_real_service(ipvs, af, iph.protocol, &iph.saddr,
 					   pptr[0])) {
 			/*
 			 * Notify the real server: there is no
@@ -1272,7 +1288,7 @@
 			}
 		}
 	}
-	IP_VS_DBG_PKT(12, af, pp, skb, 0,
+	IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
 		      "ip_vs_out: packet continues traversal as normal");
 	return NF_ACCEPT;
 }
@@ -1283,10 +1299,10 @@
  *	Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip_vs_reply4(void *priv, struct sk_buff *skb,
 	     const struct nf_hook_state *state)
 {
-	return ip_vs_out(ops->hooknum, skb, AF_INET);
+	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
 }
 
 /*
@@ -1294,10 +1310,10 @@
  *	Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip_vs_local_reply4(void *priv, struct sk_buff *skb,
 		   const struct nf_hook_state *state)
 {
-	return ip_vs_out(ops->hooknum, skb, AF_INET);
+	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -1308,10 +1324,10 @@
  *	Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip_vs_reply6(void *priv, struct sk_buff *skb,
 	     const struct nf_hook_state *state)
 {
-	return ip_vs_out(ops->hooknum, skb, AF_INET6);
+	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
 }
 
 /*
@@ -1319,14 +1335,51 @@
  *	Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip_vs_local_reply6(void *priv, struct sk_buff *skb,
 		   const struct nf_hook_state *state)
 {
-	return ip_vs_out(ops->hooknum, skb, AF_INET6);
+	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
 }
 
 #endif
 
+static unsigned int
+ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
+		      struct ip_vs_proto_data *pd,
+		      int *verdict, struct ip_vs_conn **cpp,
+		      struct ip_vs_iphdr *iph)
+{
+	struct ip_vs_protocol *pp = pd->pp;
+
+	if (!iph->fragoffs) {
+		/* No (second) fragments need to enter here, as nf_defrag_ipv6
+		 * replayed fragment zero will already have created the cp
+		 */
+
+		/* Schedule and create new connection entry into cpp */
+		if (!pp->conn_schedule(ipvs, af, skb, pd, verdict, cpp, iph))
+			return 0;
+	}
+
+	if (unlikely(!*cpp)) {
+		/* sorry, all this trouble for a no-hit :) */
+		IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
+			      "ip_vs_in: packet continues traversal as normal");
+		if (iph->fragoffs) {
+			/* Fragment that couldn't be mapped to a conn entry
+			 * is missing module nf_defrag_ipv6
+			 */
+			IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
+			IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
+				      "unhandled fragment");
+		}
+		*verdict = NF_ACCEPT;
+		return 0;
+	}
+
+	return 1;
+}
+
 /*
  *	Handle ICMP messages in the outside-to-inside direction (incoming).
  *	Find any that might be relevant, check against existing connections,
@@ -1334,9 +1387,9 @@
  *	Currently handles error types - unreachable, quench, ttl exceeded.
  */
 static int
-ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
+ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
+	      unsigned int hooknum)
 {
-	struct net *net = NULL;
 	struct iphdr *iph;
 	struct icmphdr	_icmph, *ic;
 	struct iphdr	_ciph, *cih;	/* The ip header contained within the ICMP */
@@ -1345,13 +1398,13 @@
 	struct ip_vs_protocol *pp;
 	struct ip_vs_proto_data *pd;
 	unsigned int offset, offset2, ihl, verdict;
-	bool ipip;
+	bool ipip, new_cp = false;
 
 	*related = 1;
 
 	/* reassemble IP fragments */
 	if (ip_is_fragment(ip_hdr(skb))) {
-		if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
+		if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
 			return NF_STOLEN;
 	}
 
@@ -1385,8 +1438,6 @@
 	if (cih == NULL)
 		return NF_ACCEPT; /* The packet looks wrong, ignore */
 
-	net = skb_net(skb);
-
 	/* Special case for errors for IPIP packets */
 	ipip = false;
 	if (cih->protocol == IPPROTO_IPIP) {
@@ -1402,7 +1453,7 @@
 		ipip = true;
 	}
 
-	pd = ip_vs_proto_data_get(net, cih->protocol);
+	pd = ip_vs_proto_data_get(ipvs, cih->protocol);
 	if (!pd)
 		return NF_ACCEPT;
 	pp = pd->pp;
@@ -1416,15 +1467,24 @@
 		      "Checking incoming ICMP for");
 
 	offset2 = offset;
-	ip_vs_fill_ip4hdr(cih, &ciph);
-	ciph.len += offset;
+	ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, !ipip, &ciph);
 	offset = ciph.len;
+
 	/* The embedded headers contain source and dest in reverse order.
 	 * For IPIP this is error for request, not for reply.
 	 */
-	cp = pp->conn_in_get(AF_INET, skb, &ciph, ipip ? 0 : 1);
-	if (!cp)
-		return NF_ACCEPT;
+	cp = pp->conn_in_get(ipvs, AF_INET, skb, &ciph);
+
+	if (!cp) {
+		int v;
+
+		if (!sysctl_schedule_icmp(ipvs))
+			return NF_ACCEPT;
+
+		if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
+			return v;
+		new_cp = true;
+	}
 
 	verdict = NF_DROP;
 
@@ -1455,7 +1515,7 @@
 			skb_reset_network_header(skb);
 			IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
 				&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
-			ipv4_update_pmtu(skb, dev_net(skb->dev),
+			ipv4_update_pmtu(skb, ipvs->net,
 					 mtu, 0, 0, 0, 0);
 			/* Client uses PMTUD? */
 			if (!(frag_off & htons(IP_DF)))
@@ -1501,23 +1561,26 @@
 	verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
 
 out:
-	__ip_vs_conn_put(cp);
+	if (likely(!new_cp))
+		__ip_vs_conn_put(cp);
+	else
+		ip_vs_conn_put(cp);
 
 	return verdict;
 }
 
 #ifdef CONFIG_IP_VS_IPV6
-static int ip_vs_in_icmp_v6(struct sk_buff *skb, int *related,
-			    unsigned int hooknum, struct ip_vs_iphdr *iph)
+static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
+			    int *related, unsigned int hooknum,
+			    struct ip_vs_iphdr *iph)
 {
-	struct net *net = NULL;
-	struct ipv6hdr _ip6h, *ip6h;
 	struct icmp6hdr	_icmph, *ic;
 	struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
 	struct ip_vs_conn *cp;
 	struct ip_vs_protocol *pp;
 	struct ip_vs_proto_data *pd;
-	unsigned int offs_ciph, writable, verdict;
+	unsigned int offset, verdict;
+	bool new_cp = false;
 
 	*related = 1;
 
@@ -1546,21 +1609,11 @@
 		  ic->icmp6_type, ntohs(icmpv6_id(ic)),
 		  &iph->saddr, &iph->daddr);
 
-	/* Now find the contained IP header */
-	ciph.len = iph->len + sizeof(_icmph);
-	offs_ciph = ciph.len; /* Save ip header offset */
-	ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
-	if (ip6h == NULL)
-		return NF_ACCEPT; /* The packet looks wrong, ignore */
-	ciph.saddr.in6 = ip6h->saddr; /* conn_in_get() handles reverse order */
-	ciph.daddr.in6 = ip6h->daddr;
-	/* skip possible IPv6 exthdrs of contained IPv6 packet */
-	ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
-	if (ciph.protocol < 0)
-		return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
+	offset = iph->len + sizeof(_icmph);
+	if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, offset, true, &ciph))
+		return NF_ACCEPT;
 
-	net = skb_net(skb);
-	pd = ip_vs_proto_data_get(net, ciph.protocol);
+	pd = ip_vs_proto_data_get(ipvs, ciph.protocol);
 	if (!pd)
 		return NF_ACCEPT;
 	pp = pd->pp;
@@ -1569,36 +1622,49 @@
 	if (ciph.fragoffs)
 		return NF_ACCEPT;
 
-	IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offs_ciph,
+	IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
 		      "Checking incoming ICMPv6 for");
 
 	/* The embedded headers contain source and dest in reverse order
 	 * if not from localhost
 	 */
-	cp = pp->conn_in_get(AF_INET6, skb, &ciph,
-			     (hooknum == NF_INET_LOCAL_OUT) ? 0 : 1);
+	cp = pp->conn_in_get(ipvs, AF_INET6, skb, &ciph);
 
-	if (!cp)
-		return NF_ACCEPT;
+	if (!cp) {
+		int v;
+
+		if (!sysctl_schedule_icmp(ipvs))
+			return NF_ACCEPT;
+
+		if (!ip_vs_try_to_schedule(ipvs, AF_INET6, skb, pd, &v, &cp, &ciph))
+			return v;
+
+		new_cp = true;
+	}
+
 	/* VS/TUN, VS/DR and LOCALNODE just let it go */
 	if ((hooknum == NF_INET_LOCAL_OUT) &&
 	    (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) {
-		__ip_vs_conn_put(cp);
-		return NF_ACCEPT;
+		verdict = NF_ACCEPT;
+		goto out;
 	}
 
 	/* do the statistics and put it back */
 	ip_vs_in_stats(cp, skb);
 
 	/* Need to mangle contained IPv6 header in ICMPv6 packet */
-	writable = ciph.len;
+	offset = ciph.len;
 	if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol ||
 	    IPPROTO_SCTP == ciph.protocol)
-		writable += 2 * sizeof(__u16); /* Also mangle ports */
+		offset += 2 * sizeof(__u16); /* Also mangle ports */
 
-	verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, writable, hooknum, &ciph);
+	verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum, &ciph);
 
-	__ip_vs_conn_put(cp);
+out:
+	if (likely(!new_cp))
+		__ip_vs_conn_put(cp);
+	else
+		ip_vs_conn_put(cp);
 
 	return verdict;
 }
@@ -1610,15 +1676,13 @@
  *	and send it on its way...
  */
 static unsigned int
-ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
+ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
 {
-	struct net *net;
 	struct ip_vs_iphdr iph;
 	struct ip_vs_protocol *pp;
 	struct ip_vs_proto_data *pd;
 	struct ip_vs_conn *cp;
 	int ret, pkts;
-	struct netns_ipvs *ipvs;
 	int conn_reuse_mode;
 
 	/* Already marked as IPVS request or reply? */
@@ -1633,7 +1697,7 @@
 	if (unlikely((skb->pkt_type != PACKET_HOST &&
 		      hooknum != NF_INET_LOCAL_OUT) ||
 		     !skb_dst(skb))) {
-		ip_vs_fill_iph_skb(af, skb, &iph);
+		ip_vs_fill_iph_skb(af, skb, false, &iph);
 		IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
 			      " ignored in hook %u\n",
 			      skb->pkt_type, iph.protocol,
@@ -1641,12 +1705,10 @@
 		return NF_ACCEPT;
 	}
 	/* ipvs enabled in this netns ? */
-	net = skb_net(skb);
-	ipvs = net_ipvs(net);
 	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
 		return NF_ACCEPT;
 
-	ip_vs_fill_iph_skb(af, skb, &iph);
+	ip_vs_fill_iph_skb(af, skb, false, &iph);
 
 	/* Bad... Do not break raw sockets */
 	if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
@@ -1662,8 +1724,8 @@
 	if (af == AF_INET6) {
 		if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
 			int related;
-			int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum,
-						       &iph);
+			int verdict = ip_vs_in_icmp_v6(ipvs, skb, &related,
+						       hooknum, &iph);
 
 			if (related)
 				return verdict;
@@ -1672,21 +1734,30 @@
 #endif
 		if (unlikely(iph.protocol == IPPROTO_ICMP)) {
 			int related;
-			int verdict = ip_vs_in_icmp(skb, &related, hooknum);
+			int verdict = ip_vs_in_icmp(ipvs, skb, &related,
+						    hooknum);
 
 			if (related)
 				return verdict;
 		}
 
 	/* Protocol supported? */
-	pd = ip_vs_proto_data_get(net, iph.protocol);
-	if (unlikely(!pd))
+	pd = ip_vs_proto_data_get(ipvs, iph.protocol);
+	if (unlikely(!pd)) {
+		/* The only way we'll see this packet again is if it's
+		 * encapsulated, so mark it with ipvs_property=1 so we
+		 * skip it if we're ignoring tunneled packets
+		 */
+		if (sysctl_ignore_tunneled(ipvs))
+			skb->ipvs_property = 1;
+
 		return NF_ACCEPT;
+	}
 	pp = pd->pp;
 	/*
 	 * Check if the packet belongs to an existing connection entry
 	 */
-	cp = pp->conn_in_get(af, skb, &iph, 0);
+	cp = pp->conn_in_get(ipvs, af, skb, &iph);
 
 	conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
 	if (conn_reuse_mode && !iph.fragoffs &&
@@ -1700,32 +1771,15 @@
 		cp = NULL;
 	}
 
-	if (unlikely(!cp) && !iph.fragoffs) {
-		/* No (second) fragments need to enter here, as nf_defrag_ipv6
-		 * replayed fragment zero will already have created the cp
-		 */
+	if (unlikely(!cp)) {
 		int v;
 
-		/* Schedule and create new connection entry into &cp */
-		if (!pp->conn_schedule(af, skb, pd, &v, &cp, &iph))
+		if (!ip_vs_try_to_schedule(ipvs, af, skb, pd, &v, &cp, &iph))
 			return v;
 	}
 
-	if (unlikely(!cp)) {
-		/* sorry, all this trouble for a no-hit :) */
-		IP_VS_DBG_PKT(12, af, pp, skb, 0,
-			      "ip_vs_in: packet continues traversal as normal");
-		if (iph.fragoffs) {
-			/* Fragment that couldn't be mapped to a conn entry
-			 * is missing module nf_defrag_ipv6
-			 */
-			IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
-			IP_VS_DBG_PKT(7, af, pp, skb, 0, "unhandled fragment");
-		}
-		return NF_ACCEPT;
-	}
+	IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet");
 
-	IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
 	/* Check the server status */
 	if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
 		/* the destination server is not available */
@@ -1765,7 +1819,7 @@
 		pkts = atomic_add_return(1, &cp->in_pkts);
 
 	if (ipvs->sync_state & IP_VS_STATE_MASTER)
-		ip_vs_sync_conn(net, cp, pkts);
+		ip_vs_sync_conn(ipvs, cp, pkts);
 
 	ip_vs_conn_put(cp);
 	return ret;
@@ -1776,10 +1830,10 @@
  *	Schedule and forward packets from remote clients
  */
 static unsigned int
-ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip_vs_remote_request4(void *priv, struct sk_buff *skb,
 		      const struct nf_hook_state *state)
 {
-	return ip_vs_in(ops->hooknum, skb, AF_INET);
+	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
 }
 
 /*
@@ -1787,10 +1841,10 @@
  *	Schedule and forward packets from local clients
  */
 static unsigned int
-ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip_vs_local_request4(void *priv, struct sk_buff *skb,
 		     const struct nf_hook_state *state)
 {
-	return ip_vs_in(ops->hooknum, skb, AF_INET);
+	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -1800,10 +1854,10 @@
  *	Schedule and forward packets from remote clients
  */
 static unsigned int
-ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip_vs_remote_request6(void *priv, struct sk_buff *skb,
 		      const struct nf_hook_state *state)
 {
-	return ip_vs_in(ops->hooknum, skb, AF_INET6);
+	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
 }
 
 /*
@@ -1811,10 +1865,10 @@
  *	Schedule and forward packets from local clients
  */
 static unsigned int
-ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip_vs_local_request6(void *priv, struct sk_buff *skb,
 		     const struct nf_hook_state *state)
 {
-	return ip_vs_in(ops->hooknum, skb, AF_INET6);
+	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
 }
 
 #endif
@@ -1830,46 +1884,40 @@
  *      and send them to ip_vs_in_icmp.
  */
 static unsigned int
-ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip_vs_forward_icmp(void *priv, struct sk_buff *skb,
 		   const struct nf_hook_state *state)
 {
 	int r;
-	struct net *net;
-	struct netns_ipvs *ipvs;
+	struct netns_ipvs *ipvs = net_ipvs(state->net);
 
 	if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
 		return NF_ACCEPT;
 
 	/* ipvs enabled in this netns ? */
-	net = skb_net(skb);
-	ipvs = net_ipvs(net);
 	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
 		return NF_ACCEPT;
 
-	return ip_vs_in_icmp(skb, &r, ops->hooknum);
+	return ip_vs_in_icmp(ipvs, skb, &r, state->hook);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
 static unsigned int
-ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ip_vs_forward_icmp_v6(void *priv, struct sk_buff *skb,
 		      const struct nf_hook_state *state)
 {
 	int r;
-	struct net *net;
-	struct netns_ipvs *ipvs;
+	struct netns_ipvs *ipvs = net_ipvs(state->net);
 	struct ip_vs_iphdr iphdr;
 
-	ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr);
+	ip_vs_fill_iph_skb(AF_INET6, skb, false, &iphdr);
 	if (iphdr.protocol != IPPROTO_ICMPV6)
 		return NF_ACCEPT;
 
 	/* ipvs enabled in this netns ? */
-	net = skb_net(skb);
-	ipvs = net_ipvs(net);
 	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
 		return NF_ACCEPT;
 
-	return ip_vs_in_icmp_v6(skb, &r, ops->hooknum, &iphdr);
+	return ip_vs_in_icmp_v6(ipvs, skb, &r, state->hook, &iphdr);
 }
 #endif
 
@@ -1999,22 +2047,22 @@
 	atomic_inc(&ipvs_netns_cnt);
 	net->ipvs = ipvs;
 
-	if (ip_vs_estimator_net_init(net) < 0)
+	if (ip_vs_estimator_net_init(ipvs) < 0)
 		goto estimator_fail;
 
-	if (ip_vs_control_net_init(net) < 0)
+	if (ip_vs_control_net_init(ipvs) < 0)
 		goto control_fail;
 
-	if (ip_vs_protocol_net_init(net) < 0)
+	if (ip_vs_protocol_net_init(ipvs) < 0)
 		goto protocol_fail;
 
-	if (ip_vs_app_net_init(net) < 0)
+	if (ip_vs_app_net_init(ipvs) < 0)
 		goto app_fail;
 
-	if (ip_vs_conn_net_init(net) < 0)
+	if (ip_vs_conn_net_init(ipvs) < 0)
 		goto conn_fail;
 
-	if (ip_vs_sync_net_init(net) < 0)
+	if (ip_vs_sync_net_init(ipvs) < 0)
 		goto sync_fail;
 
 	printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
@@ -2025,15 +2073,15 @@
  */
 
 sync_fail:
-	ip_vs_conn_net_cleanup(net);
+	ip_vs_conn_net_cleanup(ipvs);
 conn_fail:
-	ip_vs_app_net_cleanup(net);
+	ip_vs_app_net_cleanup(ipvs);
 app_fail:
-	ip_vs_protocol_net_cleanup(net);
+	ip_vs_protocol_net_cleanup(ipvs);
 protocol_fail:
-	ip_vs_control_net_cleanup(net);
+	ip_vs_control_net_cleanup(ipvs);
 control_fail:
-	ip_vs_estimator_net_cleanup(net);
+	ip_vs_estimator_net_cleanup(ipvs);
 estimator_fail:
 	net->ipvs = NULL;
 	return -ENOMEM;
@@ -2041,22 +2089,25 @@
 
 static void __net_exit __ip_vs_cleanup(struct net *net)
 {
-	ip_vs_service_net_cleanup(net);	/* ip_vs_flush() with locks */
-	ip_vs_conn_net_cleanup(net);
-	ip_vs_app_net_cleanup(net);
-	ip_vs_protocol_net_cleanup(net);
-	ip_vs_control_net_cleanup(net);
-	ip_vs_estimator_net_cleanup(net);
-	IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	ip_vs_service_net_cleanup(ipvs);	/* ip_vs_flush() with locks */
+	ip_vs_conn_net_cleanup(ipvs);
+	ip_vs_app_net_cleanup(ipvs);
+	ip_vs_protocol_net_cleanup(ipvs);
+	ip_vs_control_net_cleanup(ipvs);
+	ip_vs_estimator_net_cleanup(ipvs);
+	IP_VS_DBG(2, "ipvs netns %d released\n", ipvs->gen);
 	net->ipvs = NULL;
 }
 
 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	EnterFunction(2);
-	net_ipvs(net)->enable = 0;	/* Disable packet reception */
+	ipvs->enable = 0;	/* Disable packet reception */
 	smp_wmb();
-	ip_vs_sync_net_cleanup(net);
+	ip_vs_sync_net_cleanup(ipvs);
 	LeaveFunction(2);
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 1a23e91..e7c1b05 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -228,7 +228,7 @@
 
 	update_defense_level(ipvs);
 	if (atomic_read(&ipvs->dropentry))
-		ip_vs_random_dropentry(ipvs->net);
+		ip_vs_random_dropentry(ipvs);
 	schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
 }
 #endif
@@ -263,7 +263,7 @@
  *	Returns hash value for virtual service
  */
 static inline unsigned int
-ip_vs_svc_hashkey(struct net *net, int af, unsigned int proto,
+ip_vs_svc_hashkey(struct netns_ipvs *ipvs, int af, unsigned int proto,
 		  const union nf_inet_addr *addr, __be16 port)
 {
 	register unsigned int porth = ntohs(port);
@@ -276,7 +276,7 @@
 			    addr->ip6[2]^addr->ip6[3];
 #endif
 	ahash = ntohl(addr_fold);
-	ahash ^= ((size_t) net >> 8);
+	ahash ^= ((size_t) ipvs >> 8);
 
 	return (proto ^ ahash ^ (porth >> IP_VS_SVC_TAB_BITS) ^ porth) &
 	       IP_VS_SVC_TAB_MASK;
@@ -285,9 +285,9 @@
 /*
  *	Returns hash value of fwmark for virtual service lookup
  */
-static inline unsigned int ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
+static inline unsigned int ip_vs_svc_fwm_hashkey(struct netns_ipvs *ipvs, __u32 fwmark)
 {
-	return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
+	return (((size_t)ipvs>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
 }
 
 /*
@@ -309,14 +309,14 @@
 		/*
 		 *  Hash it by <netns,protocol,addr,port> in ip_vs_svc_table
 		 */
-		hash = ip_vs_svc_hashkey(svc->net, svc->af, svc->protocol,
+		hash = ip_vs_svc_hashkey(svc->ipvs, svc->af, svc->protocol,
 					 &svc->addr, svc->port);
 		hlist_add_head_rcu(&svc->s_list, &ip_vs_svc_table[hash]);
 	} else {
 		/*
 		 *  Hash it by fwmark in svc_fwm_table
 		 */
-		hash = ip_vs_svc_fwm_hashkey(svc->net, svc->fwmark);
+		hash = ip_vs_svc_fwm_hashkey(svc->ipvs, svc->fwmark);
 		hlist_add_head_rcu(&svc->f_list, &ip_vs_svc_fwm_table[hash]);
 	}
 
@@ -357,21 +357,21 @@
  *	Get service by {netns, proto,addr,port} in the service table.
  */
 static inline struct ip_vs_service *
-__ip_vs_service_find(struct net *net, int af, __u16 protocol,
+__ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u16 protocol,
 		     const union nf_inet_addr *vaddr, __be16 vport)
 {
 	unsigned int hash;
 	struct ip_vs_service *svc;
 
 	/* Check for "full" addressed entries */
-	hash = ip_vs_svc_hashkey(net, af, protocol, vaddr, vport);
+	hash = ip_vs_svc_hashkey(ipvs, af, protocol, vaddr, vport);
 
 	hlist_for_each_entry_rcu(svc, &ip_vs_svc_table[hash], s_list) {
 		if ((svc->af == af)
 		    && ip_vs_addr_equal(af, &svc->addr, vaddr)
 		    && (svc->port == vport)
 		    && (svc->protocol == protocol)
-		    && net_eq(svc->net, net)) {
+		    && (svc->ipvs == ipvs)) {
 			/* HIT */
 			return svc;
 		}
@@ -385,17 +385,17 @@
  *	Get service by {fwmark} in the service table.
  */
 static inline struct ip_vs_service *
-__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
+__ip_vs_svc_fwm_find(struct netns_ipvs *ipvs, int af, __u32 fwmark)
 {
 	unsigned int hash;
 	struct ip_vs_service *svc;
 
 	/* Check for fwmark addressed entries */
-	hash = ip_vs_svc_fwm_hashkey(net, fwmark);
+	hash = ip_vs_svc_fwm_hashkey(ipvs, fwmark);
 
 	hlist_for_each_entry_rcu(svc, &ip_vs_svc_fwm_table[hash], f_list) {
 		if (svc->fwmark == fwmark && svc->af == af
-		    && net_eq(svc->net, net)) {
+		    && (svc->ipvs == ipvs)) {
 			/* HIT */
 			return svc;
 		}
@@ -406,17 +406,16 @@
 
 /* Find service, called under RCU lock */
 struct ip_vs_service *
-ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol,
 		   const union nf_inet_addr *vaddr, __be16 vport)
 {
 	struct ip_vs_service *svc;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	/*
 	 *	Check the table hashed by fwmark first
 	 */
 	if (fwmark) {
-		svc = __ip_vs_svc_fwm_find(net, af, fwmark);
+		svc = __ip_vs_svc_fwm_find(ipvs, af, fwmark);
 		if (svc)
 			goto out;
 	}
@@ -425,7 +424,7 @@
 	 *	Check the table hashed by <protocol,addr,port>
 	 *	for "full" addressed entries
 	 */
-	svc = __ip_vs_service_find(net, af, protocol, vaddr, vport);
+	svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, vport);
 
 	if (svc == NULL
 	    && protocol == IPPROTO_TCP
@@ -435,7 +434,7 @@
 		 * Check if ftp service entry exists, the packet
 		 * might belong to FTP data connections.
 		 */
-		svc = __ip_vs_service_find(net, af, protocol, vaddr, FTPPORT);
+		svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, FTPPORT);
 	}
 
 	if (svc == NULL
@@ -443,7 +442,7 @@
 		/*
 		 * Check if the catch-all port (port zero) exists
 		 */
-		svc = __ip_vs_service_find(net, af, protocol, vaddr, 0);
+		svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, 0);
 	}
 
   out:
@@ -543,10 +542,9 @@
 }
 
 /* Check if real service by <proto,addr,port> is present */
-bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
+bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
 			    const union nf_inet_addr *daddr, __be16 dport)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	unsigned int hash;
 	struct ip_vs_dest *dest;
 
@@ -601,7 +599,7 @@
  * on the backup.
  * Called under RCU lock, no refcnt is returned.
  */
-struct ip_vs_dest *ip_vs_find_dest(struct net  *net, int svc_af, int dest_af,
+struct ip_vs_dest *ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af,
 				   const union nf_inet_addr *daddr,
 				   __be16 dport,
 				   const union nf_inet_addr *vaddr,
@@ -612,7 +610,7 @@
 	struct ip_vs_service *svc;
 	__be16 port = dport;
 
-	svc = ip_vs_service_find(net, svc_af, fwmark, protocol, vaddr, vport);
+	svc = ip_vs_service_find(ipvs, svc_af, fwmark, protocol, vaddr, vport);
 	if (!svc)
 		return NULL;
 	if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ)
@@ -660,7 +658,7 @@
 		     const union nf_inet_addr *daddr, __be16 dport)
 {
 	struct ip_vs_dest *dest;
-	struct netns_ipvs *ipvs = net_ipvs(svc->net);
+	struct netns_ipvs *ipvs = svc->ipvs;
 
 	/*
 	 * Find the destination in trash
@@ -715,10 +713,9 @@
  *  are expired, and the refcnt of each destination in the trash must
  *  be 0, so we simply release them here.
  */
-static void ip_vs_trash_cleanup(struct net *net)
+static void ip_vs_trash_cleanup(struct netns_ipvs *ipvs)
 {
 	struct ip_vs_dest *dest, *nxt;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	del_timer_sync(&ipvs->dest_trash_timer);
 	/* No need to use dest_trash_lock */
@@ -788,7 +785,7 @@
 __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
 		    struct ip_vs_dest_user_kern *udest, int add)
 {
-	struct netns_ipvs *ipvs = net_ipvs(svc->net);
+	struct netns_ipvs *ipvs = svc->ipvs;
 	struct ip_vs_service *old_svc;
 	struct ip_vs_scheduler *sched;
 	int conn_flags;
@@ -843,7 +840,7 @@
 	spin_unlock_bh(&dest->dst_lock);
 
 	if (add) {
-		ip_vs_start_estimator(svc->net, &dest->stats);
+		ip_vs_start_estimator(svc->ipvs, &dest->stats);
 		list_add_rcu(&dest->n_list, &svc->destinations);
 		svc->num_dests++;
 		sched = rcu_dereference_protected(svc->scheduler, 1);
@@ -874,12 +871,12 @@
 		atype = ipv6_addr_type(&udest->addr.in6);
 		if ((!(atype & IPV6_ADDR_UNICAST) ||
 			atype & IPV6_ADDR_LINKLOCAL) &&
-			!__ip_vs_addr_is_local_v6(svc->net, &udest->addr.in6))
+			!__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6))
 			return -EINVAL;
 	} else
 #endif
 	{
-		atype = inet_addr_type(svc->net, udest->addr.ip);
+		atype = inet_addr_type(svc->ipvs->net, udest->addr.ip);
 		if (atype != RTN_LOCAL && atype != RTN_UNICAST)
 			return -EINVAL;
 	}
@@ -1036,12 +1033,10 @@
 /*
  *	Delete a destination (must be already unlinked from the service)
  */
-static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest,
+static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest,
 			     bool cleanup)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
-	ip_vs_stop_estimator(net, &dest->stats);
+	ip_vs_stop_estimator(ipvs, &dest->stats);
 
 	/*
 	 *  Remove it from the d-linked list with the real services.
@@ -1079,7 +1074,7 @@
 	svc->num_dests--;
 
 	if (dest->af != svc->af)
-		net_ipvs(svc->net)->mixed_address_family_dests--;
+		svc->ipvs->mixed_address_family_dests--;
 
 	if (svcupd) {
 		struct ip_vs_scheduler *sched;
@@ -1120,7 +1115,7 @@
 	/*
 	 *	Delete the destination
 	 */
-	__ip_vs_del_dest(svc->net, dest, false);
+	__ip_vs_del_dest(svc->ipvs, dest, false);
 
 	LeaveFunction(2);
 
@@ -1129,8 +1124,7 @@
 
 static void ip_vs_dest_trash_expire(unsigned long data)
 {
-	struct net *net = (struct net *) data;
-	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct netns_ipvs *ipvs = (struct netns_ipvs *)data;
 	struct ip_vs_dest *dest, *next;
 	unsigned long now = jiffies;
 
@@ -1163,14 +1157,13 @@
  *	Add a service into the service hash table
  */
 static int
-ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
+ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
 		  struct ip_vs_service **svc_p)
 {
 	int ret = 0, i;
 	struct ip_vs_scheduler *sched = NULL;
 	struct ip_vs_pe *pe = NULL;
 	struct ip_vs_service *svc = NULL;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	/* increase the module use count */
 	ip_vs_use_count_inc();
@@ -1237,7 +1230,7 @@
 	svc->flags = u->flags;
 	svc->timeout = u->timeout * HZ;
 	svc->netmask = u->netmask;
-	svc->net = net;
+	svc->ipvs = ipvs;
 
 	INIT_LIST_HEAD(&svc->destinations);
 	spin_lock_init(&svc->sched_lock);
@@ -1261,7 +1254,7 @@
 	else if (svc->port == 0)
 		atomic_inc(&ipvs->nullsvc_counter);
 
-	ip_vs_start_estimator(net, &svc->stats);
+	ip_vs_start_estimator(ipvs, &svc->stats);
 
 	/* Count only IPv4 services for old get/setsockopt interface */
 	if (svc->af == AF_INET)
@@ -1381,7 +1374,7 @@
 	struct ip_vs_dest *dest, *nxt;
 	struct ip_vs_scheduler *old_sched;
 	struct ip_vs_pe *old_pe;
-	struct netns_ipvs *ipvs = net_ipvs(svc->net);
+	struct netns_ipvs *ipvs = svc->ipvs;
 
 	pr_info("%s: enter\n", __func__);
 
@@ -1389,7 +1382,7 @@
 	if (svc->af == AF_INET)
 		ipvs->num_services--;
 
-	ip_vs_stop_estimator(svc->net, &svc->stats);
+	ip_vs_stop_estimator(svc->ipvs, &svc->stats);
 
 	/* Unbind scheduler */
 	old_sched = rcu_dereference_protected(svc->scheduler, 1);
@@ -1405,7 +1398,7 @@
 	 */
 	list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
 		__ip_vs_unlink_dest(svc, dest, 0);
-		__ip_vs_del_dest(svc->net, dest, cleanup);
+		__ip_vs_del_dest(svc->ipvs, dest, cleanup);
 	}
 
 	/*
@@ -1456,7 +1449,7 @@
 /*
  *	Flush all the virtual services
  */
-static int ip_vs_flush(struct net *net, bool cleanup)
+static int ip_vs_flush(struct netns_ipvs *ipvs, bool cleanup)
 {
 	int idx;
 	struct ip_vs_service *svc;
@@ -1468,7 +1461,7 @@
 	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry_safe(svc, n, &ip_vs_svc_table[idx],
 					  s_list) {
-			if (net_eq(svc->net, net))
+			if (svc->ipvs == ipvs)
 				ip_vs_unlink_service(svc, cleanup);
 		}
 	}
@@ -1479,7 +1472,7 @@
 	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry_safe(svc, n, &ip_vs_svc_fwm_table[idx],
 					  f_list) {
-			if (net_eq(svc->net, net))
+			if (svc->ipvs == ipvs)
 				ip_vs_unlink_service(svc, cleanup);
 		}
 	}
@@ -1491,12 +1484,12 @@
  *	Delete service by {netns} in the service table.
  *	Called by __ip_vs_cleanup()
  */
-void ip_vs_service_net_cleanup(struct net *net)
+void ip_vs_service_net_cleanup(struct netns_ipvs *ipvs)
 {
 	EnterFunction(2);
 	/* Check for "full" addressed entries */
 	mutex_lock(&__ip_vs_mutex);
-	ip_vs_flush(net, true);
+	ip_vs_flush(ipvs, true);
 	mutex_unlock(&__ip_vs_mutex);
 	LeaveFunction(2);
 }
@@ -1540,7 +1533,7 @@
 	mutex_lock(&__ip_vs_mutex);
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
-			if (net_eq(svc->net, net)) {
+			if (svc->ipvs == ipvs) {
 				list_for_each_entry(dest, &svc->destinations,
 						    n_list) {
 					ip_vs_forget_dev(dest, dev);
@@ -1549,7 +1542,7 @@
 		}
 
 		hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
-			if (net_eq(svc->net, net)) {
+			if (svc->ipvs == ipvs) {
 				list_for_each_entry(dest, &svc->destinations,
 						    n_list) {
 					ip_vs_forget_dev(dest, dev);
@@ -1583,26 +1576,26 @@
 	return 0;
 }
 
-static int ip_vs_zero_all(struct net *net)
+static int ip_vs_zero_all(struct netns_ipvs *ipvs)
 {
 	int idx;
 	struct ip_vs_service *svc;
 
 	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
-			if (net_eq(svc->net, net))
+			if (svc->ipvs == ipvs)
 				ip_vs_zero_service(svc);
 		}
 	}
 
 	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
-			if (net_eq(svc->net, net))
+			if (svc->ipvs == ipvs)
 				ip_vs_zero_service(svc);
 		}
 	}
 
-	ip_vs_zero_stats(&net_ipvs(net)->tot_stats);
+	ip_vs_zero_stats(&ipvs->tot_stats);
 	return 0;
 }
 
@@ -1615,7 +1608,7 @@
 proc_do_defense_mode(struct ctl_table *table, int write,
 		     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct net *net = current->nsproxy->net_ns;
+	struct netns_ipvs *ipvs = table->extra2;
 	int *valp = table->data;
 	int val = *valp;
 	int rc;
@@ -1626,7 +1619,7 @@
 			/* Restore the correct value */
 			*valp = val;
 		} else {
-			update_defense_level(net_ipvs(net));
+			update_defense_level(ipvs);
 		}
 	}
 	return rc;
@@ -1844,6 +1837,18 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+	{
+		.procname	= "schedule_icmp",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "ignore_tunneled",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
 #ifdef CONFIG_IP_VS_DEBUG
 	{
 		.procname	= "debug_level",
@@ -1889,6 +1894,7 @@
 static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
 {
 	struct net *net = seq_file_net(seq);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_iter *iter = seq->private;
 	int idx;
 	struct ip_vs_service *svc;
@@ -1896,7 +1902,7 @@
 	/* look in hash by protocol */
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry_rcu(svc, &ip_vs_svc_table[idx], s_list) {
-			if (net_eq(svc->net, net) && pos-- == 0) {
+			if ((svc->ipvs == ipvs) && pos-- == 0) {
 				iter->table = ip_vs_svc_table;
 				iter->bucket = idx;
 				return svc;
@@ -1908,7 +1914,7 @@
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry_rcu(svc, &ip_vs_svc_fwm_table[idx],
 					 f_list) {
-			if (net_eq(svc->net, net) && pos-- == 0) {
+			if ((svc->ipvs == ipvs) && pos-- == 0) {
 				iter->table = ip_vs_svc_fwm_table;
 				iter->bucket = idx;
 				return svc;
@@ -2196,7 +2202,7 @@
 /*
  *	Set timeout values for tcp tcpfin udp in the timeout_table.
  */
-static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u)
+static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user *u)
 {
 #if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
 	struct ip_vs_proto_data *pd;
@@ -2209,13 +2215,13 @@
 
 #ifdef CONFIG_IP_VS_PROTO_TCP
 	if (u->tcp_timeout) {
-		pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+		pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
 		pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
 			= u->tcp_timeout * HZ;
 	}
 
 	if (u->tcp_fin_timeout) {
-		pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+		pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
 		pd->timeout_table[IP_VS_TCP_S_FIN_WAIT]
 			= u->tcp_fin_timeout * HZ;
 	}
@@ -2223,7 +2229,7 @@
 
 #ifdef CONFIG_IP_VS_PROTO_UDP
 	if (u->udp_timeout) {
-		pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+		pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP);
 		pd->timeout_table[IP_VS_UDP_S_NORMAL]
 			= u->udp_timeout * HZ;
 	}
@@ -2344,12 +2350,12 @@
 			cfg.syncid = dm->syncid;
 			rtnl_lock();
 			mutex_lock(&ipvs->sync_mutex);
-			ret = start_sync_thread(net, &cfg, dm->state);
+			ret = start_sync_thread(ipvs, &cfg, dm->state);
 			mutex_unlock(&ipvs->sync_mutex);
 			rtnl_unlock();
 		} else {
 			mutex_lock(&ipvs->sync_mutex);
-			ret = stop_sync_thread(net, dm->state);
+			ret = stop_sync_thread(ipvs, dm->state);
 			mutex_unlock(&ipvs->sync_mutex);
 		}
 		goto out_dec;
@@ -2358,11 +2364,11 @@
 	mutex_lock(&__ip_vs_mutex);
 	if (cmd == IP_VS_SO_SET_FLUSH) {
 		/* Flush the virtual service */
-		ret = ip_vs_flush(net, false);
+		ret = ip_vs_flush(ipvs, false);
 		goto out_unlock;
 	} else if (cmd == IP_VS_SO_SET_TIMEOUT) {
 		/* Set timeout values for (tcp tcpfin udp) */
-		ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
+		ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg);
 		goto out_unlock;
 	}
 
@@ -2377,7 +2383,7 @@
 	if (cmd == IP_VS_SO_SET_ZERO) {
 		/* if no service address is set, zero counters in all */
 		if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
-			ret = ip_vs_zero_all(net);
+			ret = ip_vs_zero_all(ipvs);
 			goto out_unlock;
 		}
 	}
@@ -2395,10 +2401,10 @@
 	/* Lookup the exact service by <protocol, addr, port> or fwmark */
 	rcu_read_lock();
 	if (usvc.fwmark == 0)
-		svc = __ip_vs_service_find(net, usvc.af, usvc.protocol,
+		svc = __ip_vs_service_find(ipvs, usvc.af, usvc.protocol,
 					   &usvc.addr, usvc.port);
 	else
-		svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark);
+		svc = __ip_vs_svc_fwm_find(ipvs, usvc.af, usvc.fwmark);
 	rcu_read_unlock();
 
 	if (cmd != IP_VS_SO_SET_ADD
@@ -2412,7 +2418,7 @@
 		if (svc != NULL)
 			ret = -EEXIST;
 		else
-			ret = ip_vs_add_service(net, &usvc, &svc);
+			ret = ip_vs_add_service(ipvs, &usvc, &svc);
 		break;
 	case IP_VS_SO_SET_EDIT:
 		ret = ip_vs_edit_service(svc, &usvc);
@@ -2471,7 +2477,7 @@
 }
 
 static inline int
-__ip_vs_get_service_entries(struct net *net,
+__ip_vs_get_service_entries(struct netns_ipvs *ipvs,
 			    const struct ip_vs_get_services *get,
 			    struct ip_vs_get_services __user *uptr)
 {
@@ -2483,7 +2489,7 @@
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
 			/* Only expose IPv4 entries to old interface */
-			if (svc->af != AF_INET || !net_eq(svc->net, net))
+			if (svc->af != AF_INET || (svc->ipvs != ipvs))
 				continue;
 
 			if (count >= get->num_services)
@@ -2502,7 +2508,7 @@
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
 			/* Only expose IPv4 entries to old interface */
-			if (svc->af != AF_INET || !net_eq(svc->net, net))
+			if (svc->af != AF_INET || (svc->ipvs != ipvs))
 				continue;
 
 			if (count >= get->num_services)
@@ -2522,7 +2528,7 @@
 }
 
 static inline int
-__ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
+__ip_vs_get_dest_entries(struct netns_ipvs *ipvs, const struct ip_vs_get_dests *get,
 			 struct ip_vs_get_dests __user *uptr)
 {
 	struct ip_vs_service *svc;
@@ -2531,9 +2537,9 @@
 
 	rcu_read_lock();
 	if (get->fwmark)
-		svc = __ip_vs_svc_fwm_find(net, AF_INET, get->fwmark);
+		svc = __ip_vs_svc_fwm_find(ipvs, AF_INET, get->fwmark);
 	else
-		svc = __ip_vs_service_find(net, AF_INET, get->protocol, &addr,
+		svc = __ip_vs_service_find(ipvs, AF_INET, get->protocol, &addr,
 					   get->port);
 	rcu_read_unlock();
 
@@ -2578,7 +2584,7 @@
 }
 
 static inline void
-__ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
+__ip_vs_get_timeouts(struct netns_ipvs *ipvs, struct ip_vs_timeout_user *u)
 {
 #if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
 	struct ip_vs_proto_data *pd;
@@ -2587,12 +2593,12 @@
 	memset(u, 0, sizeof (*u));
 
 #ifdef CONFIG_IP_VS_PROTO_TCP
-	pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+	pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
 	u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
 	u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
 #endif
 #ifdef CONFIG_IP_VS_PROTO_UDP
-	pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+	pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP);
 	u->udp_timeout =
 			pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
 #endif
@@ -2711,7 +2717,7 @@
 			ret = -EINVAL;
 			goto out;
 		}
-		ret = __ip_vs_get_service_entries(net, get, user);
+		ret = __ip_vs_get_service_entries(ipvs, get, user);
 	}
 	break;
 
@@ -2725,9 +2731,9 @@
 		addr.ip = entry->addr;
 		rcu_read_lock();
 		if (entry->fwmark)
-			svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark);
+			svc = __ip_vs_svc_fwm_find(ipvs, AF_INET, entry->fwmark);
 		else
-			svc = __ip_vs_service_find(net, AF_INET,
+			svc = __ip_vs_service_find(ipvs, AF_INET,
 						   entry->protocol, &addr,
 						   entry->port);
 		rcu_read_unlock();
@@ -2753,7 +2759,7 @@
 			ret = -EINVAL;
 			goto out;
 		}
-		ret = __ip_vs_get_dest_entries(net, get, user);
+		ret = __ip_vs_get_dest_entries(ipvs, get, user);
 	}
 	break;
 
@@ -2761,7 +2767,7 @@
 	{
 		struct ip_vs_timeout_user t;
 
-		__ip_vs_get_timeouts(net, &t);
+		__ip_vs_get_timeouts(ipvs, &t);
 		if (copy_to_user(user, &t, sizeof(t)) != 0)
 			ret = -EFAULT;
 	}
@@ -2996,12 +3002,13 @@
 	int idx = 0, i;
 	int start = cb->args[0];
 	struct ip_vs_service *svc;
-	struct net *net = skb_sknet(skb);
+	struct net *net = sock_net(skb->sk);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	mutex_lock(&__ip_vs_mutex);
 	for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
-			if (++idx <= start || !net_eq(svc->net, net))
+			if (++idx <= start || (svc->ipvs != ipvs))
 				continue;
 			if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
 				idx--;
@@ -3012,7 +3019,7 @@
 
 	for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
-			if (++idx <= start || !net_eq(svc->net, net))
+			if (++idx <= start || (svc->ipvs != ipvs))
 				continue;
 			if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
 				idx--;
@@ -3028,7 +3035,7 @@
 	return skb->len;
 }
 
-static int ip_vs_genl_parse_service(struct net *net,
+static int ip_vs_genl_parse_service(struct netns_ipvs *ipvs,
 				    struct ip_vs_service_user_kern *usvc,
 				    struct nlattr *nla, int full_entry,
 				    struct ip_vs_service **ret_svc)
@@ -3073,9 +3080,9 @@
 
 	rcu_read_lock();
 	if (usvc->fwmark)
-		svc = __ip_vs_svc_fwm_find(net, usvc->af, usvc->fwmark);
+		svc = __ip_vs_svc_fwm_find(ipvs, usvc->af, usvc->fwmark);
 	else
-		svc = __ip_vs_service_find(net, usvc->af, usvc->protocol,
+		svc = __ip_vs_service_find(ipvs, usvc->af, usvc->protocol,
 					   &usvc->addr, usvc->port);
 	rcu_read_unlock();
 	*ret_svc = svc;
@@ -3113,14 +3120,14 @@
 	return 0;
 }
 
-static struct ip_vs_service *ip_vs_genl_find_service(struct net *net,
+static struct ip_vs_service *ip_vs_genl_find_service(struct netns_ipvs *ipvs,
 						     struct nlattr *nla)
 {
 	struct ip_vs_service_user_kern usvc;
 	struct ip_vs_service *svc;
 	int ret;
 
-	ret = ip_vs_genl_parse_service(net, &usvc, nla, 0, &svc);
+	ret = ip_vs_genl_parse_service(ipvs, &usvc, nla, 0, &svc);
 	return ret ? ERR_PTR(ret) : svc;
 }
 
@@ -3195,7 +3202,8 @@
 	struct ip_vs_service *svc;
 	struct ip_vs_dest *dest;
 	struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
-	struct net *net = skb_sknet(skb);
+	struct net *net = sock_net(skb->sk);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	mutex_lock(&__ip_vs_mutex);
 
@@ -3205,7 +3213,7 @@
 		goto out_err;
 
 
-	svc = ip_vs_genl_find_service(net, attrs[IPVS_CMD_ATTR_SERVICE]);
+	svc = ip_vs_genl_find_service(ipvs, attrs[IPVS_CMD_ATTR_SERVICE]);
 	if (IS_ERR(svc) || svc == NULL)
 		goto out_err;
 
@@ -3341,7 +3349,7 @@
 static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
 				   struct netlink_callback *cb)
 {
-	struct net *net = skb_sknet(skb);
+	struct net *net = sock_net(skb->sk);
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	mutex_lock(&ipvs->sync_mutex);
@@ -3367,9 +3375,8 @@
 	return skb->len;
 }
 
-static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
+static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ipvs_sync_daemon_cfg c;
 	struct nlattr *a;
 	int ret;
@@ -3426,33 +3433,32 @@
 
 	rtnl_lock();
 	mutex_lock(&ipvs->sync_mutex);
-	ret = start_sync_thread(net, &c,
+	ret = start_sync_thread(ipvs, &c,
 				nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
 	mutex_unlock(&ipvs->sync_mutex);
 	rtnl_unlock();
 	return ret;
 }
 
-static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs)
+static int ip_vs_genl_del_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	int ret;
 
 	if (!attrs[IPVS_DAEMON_ATTR_STATE])
 		return -EINVAL;
 
 	mutex_lock(&ipvs->sync_mutex);
-	ret = stop_sync_thread(net,
+	ret = stop_sync_thread(ipvs,
 			       nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
 	mutex_unlock(&ipvs->sync_mutex);
 	return ret;
 }
 
-static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
+static int ip_vs_genl_set_config(struct netns_ipvs *ipvs, struct nlattr **attrs)
 {
 	struct ip_vs_timeout_user t;
 
-	__ip_vs_get_timeouts(net, &t);
+	__ip_vs_get_timeouts(ipvs, &t);
 
 	if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
 		t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
@@ -3464,17 +3470,15 @@
 	if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
 		t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
 
-	return ip_vs_set_timeout(net, &t);
+	return ip_vs_set_timeout(ipvs, &t);
 }
 
 static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
 {
 	int ret = -EINVAL, cmd;
-	struct net *net;
-	struct netns_ipvs *ipvs;
+	struct net *net = sock_net(skb->sk);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
-	net = skb_sknet(skb);
-	ipvs = net_ipvs(net);
 	cmd = info->genlhdr->cmd;
 
 	if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) {
@@ -3487,9 +3491,9 @@
 			goto out;
 
 		if (cmd == IPVS_CMD_NEW_DAEMON)
-			ret = ip_vs_genl_new_daemon(net, daemon_attrs);
+			ret = ip_vs_genl_new_daemon(ipvs, daemon_attrs);
 		else
-			ret = ip_vs_genl_del_daemon(net, daemon_attrs);
+			ret = ip_vs_genl_del_daemon(ipvs, daemon_attrs);
 	}
 
 out:
@@ -3503,22 +3507,22 @@
 	struct ip_vs_dest_user_kern udest;
 	int ret = 0, cmd;
 	int need_full_svc = 0, need_full_dest = 0;
-	struct net *net;
+	struct net *net = sock_net(skb->sk);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
-	net = skb_sknet(skb);
 	cmd = info->genlhdr->cmd;
 
 	mutex_lock(&__ip_vs_mutex);
 
 	if (cmd == IPVS_CMD_FLUSH) {
-		ret = ip_vs_flush(net, false);
+		ret = ip_vs_flush(ipvs, false);
 		goto out;
 	} else if (cmd == IPVS_CMD_SET_CONFIG) {
-		ret = ip_vs_genl_set_config(net, info->attrs);
+		ret = ip_vs_genl_set_config(ipvs, info->attrs);
 		goto out;
 	} else if (cmd == IPVS_CMD_ZERO &&
 		   !info->attrs[IPVS_CMD_ATTR_SERVICE]) {
-		ret = ip_vs_zero_all(net);
+		ret = ip_vs_zero_all(ipvs);
 		goto out;
 	}
 
@@ -3528,7 +3532,7 @@
 	if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
 		need_full_svc = 1;
 
-	ret = ip_vs_genl_parse_service(net, &usvc,
+	ret = ip_vs_genl_parse_service(ipvs, &usvc,
 				       info->attrs[IPVS_CMD_ATTR_SERVICE],
 				       need_full_svc, &svc);
 	if (ret)
@@ -3567,7 +3571,7 @@
 			/* The synchronization protocol is incompatible
 			 * with mixed family services
 			 */
-			if (net_ipvs(net)->sync_state) {
+			if (ipvs->sync_state) {
 				ret = -EINVAL;
 				goto out;
 			}
@@ -3587,7 +3591,7 @@
 	switch (cmd) {
 	case IPVS_CMD_NEW_SERVICE:
 		if (svc == NULL)
-			ret = ip_vs_add_service(net, &usvc, &svc);
+			ret = ip_vs_add_service(ipvs, &usvc, &svc);
 		else
 			ret = -EEXIST;
 		break;
@@ -3625,9 +3629,9 @@
 	struct sk_buff *msg;
 	void *reply;
 	int ret, cmd, reply_cmd;
-	struct net *net;
+	struct net *net = sock_net(skb->sk);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
-	net = skb_sknet(skb);
 	cmd = info->genlhdr->cmd;
 
 	if (cmd == IPVS_CMD_GET_SERVICE)
@@ -3656,7 +3660,7 @@
 	{
 		struct ip_vs_service *svc;
 
-		svc = ip_vs_genl_find_service(net,
+		svc = ip_vs_genl_find_service(ipvs,
 					      info->attrs[IPVS_CMD_ATTR_SERVICE]);
 		if (IS_ERR(svc)) {
 			ret = PTR_ERR(svc);
@@ -3677,7 +3681,7 @@
 	{
 		struct ip_vs_timeout_user t;
 
-		__ip_vs_get_timeouts(net, &t);
+		__ip_vs_get_timeouts(ipvs, &t);
 #ifdef CONFIG_IP_VS_PROTO_TCP
 		if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP,
 				t.tcp_timeout) ||
@@ -3832,10 +3836,10 @@
  * per netns intit/exit func.
  */
 #ifdef CONFIG_SYSCTL
-static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
+static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
 {
+	struct net *net = ipvs->net;
 	int idx;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ctl_table *tbl;
 
 	atomic_set(&ipvs->dropentry, 0);
@@ -3854,6 +3858,10 @@
 	} else
 		tbl = vs_vars;
 	/* Initialize sysctl defaults */
+	for (idx = 0; idx < ARRAY_SIZE(vs_vars); idx++) {
+		if (tbl[idx].proc_handler == proc_do_defense_mode)
+			tbl[idx].extra2 = ipvs;
+	}
 	idx = 0;
 	ipvs->sysctl_amemthresh = 1024;
 	tbl[idx++].data = &ipvs->sysctl_amemthresh;
@@ -3895,7 +3903,8 @@
 	tbl[idx++].data = &ipvs->sysctl_backup_only;
 	ipvs->sysctl_conn_reuse_mode = 1;
 	tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
-
+	tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
+	tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
 
 	ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
 	if (ipvs->sysctl_hdr == NULL) {
@@ -3903,7 +3912,7 @@
 			kfree(tbl);
 		return -ENOMEM;
 	}
-	ip_vs_start_estimator(net, &ipvs->tot_stats);
+	ip_vs_start_estimator(ipvs, &ipvs->tot_stats);
 	ipvs->sysctl_tbl = tbl;
 	/* Schedule defense work */
 	INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
@@ -3912,14 +3921,14 @@
 	return 0;
 }
 
-static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
+static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct net *net = ipvs->net;
 
 	cancel_delayed_work_sync(&ipvs->defense_work);
 	cancel_work_sync(&ipvs->defense_work.work);
 	unregister_net_sysctl_table(ipvs->sysctl_hdr);
-	ip_vs_stop_estimator(net, &ipvs->tot_stats);
+	ip_vs_stop_estimator(ipvs, &ipvs->tot_stats);
 
 	if (!net_eq(net, &init_net))
 		kfree(ipvs->sysctl_tbl);
@@ -3927,8 +3936,8 @@
 
 #else
 
-static int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
-static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
+static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) { return 0; }
+static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs) { }
 
 #endif
 
@@ -3936,10 +3945,10 @@
 	.notifier_call = ip_vs_dst_event,
 };
 
-int __net_init ip_vs_control_net_init(struct net *net)
+int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
 {
+	struct net *net = ipvs->net;
 	int i, idx;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	/* Initialize rs_table */
 	for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
@@ -3948,7 +3957,7 @@
 	INIT_LIST_HEAD(&ipvs->dest_trash);
 	spin_lock_init(&ipvs->dest_trash_lock);
 	setup_timer(&ipvs->dest_trash_timer, ip_vs_dest_trash_expire,
-		    (unsigned long) net);
+		    (unsigned long) ipvs);
 	atomic_set(&ipvs->ftpsvc_counter, 0);
 	atomic_set(&ipvs->nullsvc_counter, 0);
 
@@ -3970,7 +3979,7 @@
 	proc_create("ip_vs_stats_percpu", 0, net->proc_net,
 		    &ip_vs_stats_percpu_fops);
 
-	if (ip_vs_control_net_init_sysctl(net))
+	if (ip_vs_control_net_init_sysctl(ipvs))
 		goto err;
 
 	return 0;
@@ -3980,12 +3989,12 @@
 	return -ENOMEM;
 }
 
-void __net_exit ip_vs_control_net_cleanup(struct net *net)
+void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct net *net = ipvs->net;
 
-	ip_vs_trash_cleanup(net);
-	ip_vs_control_net_cleanup_sysctl(net);
+	ip_vs_trash_cleanup(ipvs);
+	ip_vs_control_net_cleanup_sysctl(ipvs);
 	remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
 	remove_proc_entry("ip_vs_stats", net->proc_net);
 	remove_proc_entry("ip_vs", net->proc_net);
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index ef0eb0a..457c6c1 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -102,10 +102,8 @@
 	struct ip_vs_estimator *e;
 	struct ip_vs_stats *s;
 	u64 rate;
-	struct net *net = (struct net *)arg;
-	struct netns_ipvs *ipvs;
+	struct netns_ipvs *ipvs = (struct netns_ipvs *)arg;
 
-	ipvs = net_ipvs(net);
 	spin_lock(&ipvs->est_lock);
 	list_for_each_entry(e, &ipvs->est_list, list) {
 		s = container_of(e, struct ip_vs_stats, est);
@@ -140,9 +138,8 @@
 	mod_timer(&ipvs->est_timer, jiffies + 2*HZ);
 }
 
-void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats)
+void ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_estimator *est = &stats->est;
 
 	INIT_LIST_HEAD(&est->list);
@@ -152,9 +149,8 @@
 	spin_unlock_bh(&ipvs->est_lock);
 }
 
-void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats)
+void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_estimator *est = &stats->est;
 
 	spin_lock_bh(&ipvs->est_lock);
@@ -192,18 +188,16 @@
 	dst->outbps = (e->outbps + 0xF) >> 5;
 }
 
-int __net_init ip_vs_estimator_net_init(struct net *net)
+int __net_init ip_vs_estimator_net_init(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
 	INIT_LIST_HEAD(&ipvs->est_list);
 	spin_lock_init(&ipvs->est_lock);
-	setup_timer(&ipvs->est_timer, estimation_timer, (unsigned long)net);
+	setup_timer(&ipvs->est_timer, estimation_timer, (unsigned long)ipvs);
 	mod_timer(&ipvs->est_timer, jiffies + 2 * HZ);
 	return 0;
 }
 
-void __net_exit ip_vs_estimator_net_cleanup(struct net *net)
+void __net_exit ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs)
 {
-	del_timer_sync(&net_ipvs(net)->est_timer);
+	del_timer_sync(&ipvs->est_timer);
 }
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 5d3daae..d30c327 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -181,7 +181,6 @@
 	int ret = 0;
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn *ct;
-	struct net *net;
 
 	*diff = 0;
 
@@ -223,14 +222,14 @@
 		 */
 		{
 			struct ip_vs_conn_param p;
-			ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+			ip_vs_conn_fill_param(cp->ipvs, AF_INET,
 					      iph->protocol, &from, port,
 					      &cp->caddr, 0, &p);
 			n_cp = ip_vs_conn_out_get(&p);
 		}
 		if (!n_cp) {
 			struct ip_vs_conn_param p;
-			ip_vs_conn_fill_param(ip_vs_conn_net(cp),
+			ip_vs_conn_fill_param(cp->ipvs,
 					      AF_INET, IPPROTO_TCP, &cp->caddr,
 					      0, &cp->vaddr, port, &p);
 			/* As above, this is ipv4 only */
@@ -289,9 +288,8 @@
 		 * would be adjusted twice.
 		 */
 
-		net = skb_net(skb);
 		cp->app_data = NULL;
-		ip_vs_tcp_conn_listen(net, n_cp);
+		ip_vs_tcp_conn_listen(n_cp);
 		ip_vs_conn_put(n_cp);
 		return ret;
 	}
@@ -320,7 +318,6 @@
 	union nf_inet_addr to;
 	__be16 port;
 	struct ip_vs_conn *n_cp;
-	struct net *net;
 
 	/* no diff required for incoming packets */
 	*diff = 0;
@@ -392,7 +389,7 @@
 
 	{
 		struct ip_vs_conn_param p;
-		ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+		ip_vs_conn_fill_param(cp->ipvs, AF_INET,
 				      iph->protocol, &to, port, &cp->vaddr,
 				      htons(ntohs(cp->vport)-1), &p);
 		n_cp = ip_vs_conn_in_get(&p);
@@ -413,8 +410,7 @@
 	/*
 	 *	Move tunnel to listen state
 	 */
-	net = skb_net(skb);
-	ip_vs_tcp_conn_listen(net, n_cp);
+	ip_vs_tcp_conn_listen(n_cp);
 	ip_vs_conn_put(n_cp);
 
 	return 1;
@@ -447,14 +443,14 @@
 	if (!ipvs)
 		return -ENOENT;
 
-	app = register_ip_vs_app(net, &ip_vs_ftp);
+	app = register_ip_vs_app(ipvs, &ip_vs_ftp);
 	if (IS_ERR(app))
 		return PTR_ERR(app);
 
 	for (i = 0; i < ports_count; i++) {
 		if (!ports[i])
 			continue;
-		ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
+		ret = register_ip_vs_app_inc(ipvs, app, app->protocol, ports[i]);
 		if (ret)
 			goto err_unreg;
 		pr_info("%s: loaded support on port[%d] = %d\n",
@@ -463,7 +459,7 @@
 	return 0;
 
 err_unreg:
-	unregister_ip_vs_app(net, &ip_vs_ftp);
+	unregister_ip_vs_app(ipvs, &ip_vs_ftp);
 	return ret;
 }
 /*
@@ -471,7 +467,12 @@
  */
 static void __ip_vs_ftp_exit(struct net *net)
 {
-	unregister_ip_vs_app(net, &ip_vs_ftp);
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	if (!ipvs)
+		return;
+
+	unregister_ip_vs_app(ipvs, &ip_vs_ftp);
 }
 
 static struct pernet_operations ip_vs_ftp_ops = {
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 127f140..cccf4d6 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -250,8 +250,7 @@
 static int sysctl_lblc_expiration(struct ip_vs_service *svc)
 {
 #ifdef CONFIG_SYSCTL
-	struct netns_ipvs *ipvs = net_ipvs(svc->net);
-	return ipvs->sysctl_lblc_expiration;
+	return svc->ipvs->sysctl_lblc_expiration;
 #else
 	return DEFAULT_EXPIRATION;
 #endif
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 2229d2d..796d70e 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -415,8 +415,7 @@
 static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
 {
 #ifdef CONFIG_SYSCTL
-	struct netns_ipvs *ipvs = net_ipvs(svc->net);
-	return ipvs->sysctl_lblcr_expiration;
+	return svc->ipvs->sysctl_lblcr_expiration;
 #else
 	return DEFAULT_EXPIRATION;
 #endif
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 1361845..30434fb 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -161,7 +161,7 @@
 
 	/* RS->CLIENT */
 	orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
-	ip_vs_conn_fill_param(net, exp->tuple.src.l3num, orig->dst.protonum,
+	ip_vs_conn_fill_param(net_ipvs(net), exp->tuple.src.l3num, orig->dst.protonum,
 			      &orig->src.u3, orig->src.u.tcp.port,
 			      &orig->dst.u3, orig->dst.u.tcp.port, &p);
 	cp = ip_vs_conn_out_get(&p);
@@ -274,8 +274,7 @@
 		" for conn " FMT_CONN "\n",
 		__func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
 
-	h = nf_conntrack_find_get(ip_vs_conn_net(cp), &nf_ct_zone_dflt,
-				  &tuple);
+	h = nf_conntrack_find_get(cp->ipvs->net, &nf_ct_zone_dflt, &tuple);
 	if (h) {
 		ct = nf_ct_tuplehash_to_ctrack(h);
 		/* Show what happens instead of calling nf_ct_kill() */
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index bed5f70..1b8d594 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -70,7 +70,7 @@
 	const char *dptr;
 	int retc;
 
-	ip_vs_fill_iph_skb(p->af, skb, &iph);
+	ip_vs_fill_iph_skb(p->af, skb, false, &iph);
 
 	/* Only useful with UDP */
 	if (iph.protocol != IPPROTO_UDP)
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 939f7fb..8ae4807 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -63,9 +63,8 @@
  *	register an ipvs protocols netns related data
  */
 static int
-register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
+register_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_protocol *pp)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
 	struct ip_vs_proto_data *pd =
 			kzalloc(sizeof(struct ip_vs_proto_data), GFP_KERNEL);
@@ -79,7 +78,7 @@
 	atomic_set(&pd->appcnt, 0);	/* Init app counter */
 
 	if (pp->init_netns != NULL) {
-		int ret = pp->init_netns(net, pd);
+		int ret = pp->init_netns(ipvs, pd);
 		if (ret) {
 			/* unlink an free proto data */
 			ipvs->proto_data_table[hash] = pd->next;
@@ -116,9 +115,8 @@
  *	unregister an ipvs protocols netns data
  */
 static int
-unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
+unregister_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_proto_data **pd_p;
 	unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol);
 
@@ -127,7 +125,7 @@
 		if (*pd_p == pd) {
 			*pd_p = pd->next;
 			if (pd->pp->exit_netns != NULL)
-				pd->pp->exit_netns(net, pd);
+				pd->pp->exit_netns(ipvs, pd);
 			kfree(pd);
 			return 0;
 		}
@@ -156,8 +154,8 @@
 /*
  *	get ip_vs_protocol object data by netns and proto
  */
-static struct ip_vs_proto_data *
-__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
+struct ip_vs_proto_data *
+ip_vs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
 {
 	struct ip_vs_proto_data *pd;
 	unsigned int hash = IP_VS_PROTO_HASH(proto);
@@ -169,14 +167,6 @@
 
 	return NULL;
 }
-
-struct ip_vs_proto_data *
-ip_vs_proto_data_get(struct net *net, unsigned short proto)
-{
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
-	return __ipvs_proto_data_get(ipvs, proto);
-}
 EXPORT_SYMBOL(ip_vs_proto_data_get);
 
 /*
@@ -317,7 +307,7 @@
 /*
  * per network name-space init
  */
-int __net_init ip_vs_protocol_net_init(struct net *net)
+int __net_init ip_vs_protocol_net_init(struct netns_ipvs *ipvs)
 {
 	int i, ret;
 	static struct ip_vs_protocol *protos[] = {
@@ -339,27 +329,26 @@
 	};
 
 	for (i = 0; i < ARRAY_SIZE(protos); i++) {
-		ret = register_ip_vs_proto_netns(net, protos[i]);
+		ret = register_ip_vs_proto_netns(ipvs, protos[i]);
 		if (ret < 0)
 			goto cleanup;
 	}
 	return 0;
 
 cleanup:
-	ip_vs_protocol_net_cleanup(net);
+	ip_vs_protocol_net_cleanup(ipvs);
 	return ret;
 }
 
-void __net_exit ip_vs_protocol_net_cleanup(struct net *net)
+void __net_exit ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_proto_data *pd;
 	int i;
 
 	/* unregister all the ipvs proto data for this netns */
 	for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
 		while ((pd = ipvs->proto_data_table[i]) != NULL)
-			unregister_ip_vs_proto_netns(net, pd);
+			unregister_ip_vs_proto_netns(ipvs, pd);
 	}
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index 5de3dd3..5320d39 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -41,30 +41,28 @@
 #define PORT_ISAKMP	500
 
 static void
-ah_esp_conn_fill_param_proto(struct net *net, int af,
-			     const struct ip_vs_iphdr *iph, int inverse,
+ah_esp_conn_fill_param_proto(struct netns_ipvs *ipvs, int af,
+			     const struct ip_vs_iphdr *iph,
 			     struct ip_vs_conn_param *p)
 {
-	if (likely(!inverse))
-		ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
+	if (likely(!ip_vs_iph_inverse(iph)))
+		ip_vs_conn_fill_param(ipvs, af, IPPROTO_UDP,
 				      &iph->saddr, htons(PORT_ISAKMP),
 				      &iph->daddr, htons(PORT_ISAKMP), p);
 	else
-		ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
+		ip_vs_conn_fill_param(ipvs, af, IPPROTO_UDP,
 				      &iph->daddr, htons(PORT_ISAKMP),
 				      &iph->saddr, htons(PORT_ISAKMP), p);
 }
 
 static struct ip_vs_conn *
-ah_esp_conn_in_get(int af, const struct sk_buff *skb,
-		   const struct ip_vs_iphdr *iph,
-		   int inverse)
+ah_esp_conn_in_get(struct netns_ipvs *ipvs, int af, const struct sk_buff *skb,
+		   const struct ip_vs_iphdr *iph)
 {
 	struct ip_vs_conn *cp;
 	struct ip_vs_conn_param p;
-	struct net *net = skb_net(skb);
 
-	ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
+	ah_esp_conn_fill_param_proto(ipvs, af, iph, &p);
 	cp = ip_vs_conn_in_get(&p);
 	if (!cp) {
 		/*
@@ -73,7 +71,7 @@
 		 */
 		IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet "
 			      "%s%s %s->%s\n",
-			      inverse ? "ICMP+" : "",
+			      ip_vs_iph_icmp(iph) ? "ICMP+" : "",
 			      ip_vs_proto_get(iph->protocol)->name,
 			      IP_VS_DBG_ADDR(af, &iph->saddr),
 			      IP_VS_DBG_ADDR(af, &iph->daddr));
@@ -84,19 +82,18 @@
 
 
 static struct ip_vs_conn *
-ah_esp_conn_out_get(int af, const struct sk_buff *skb,
-		    const struct ip_vs_iphdr *iph, int inverse)
+ah_esp_conn_out_get(struct netns_ipvs *ipvs, int af, const struct sk_buff *skb,
+		    const struct ip_vs_iphdr *iph)
 {
 	struct ip_vs_conn *cp;
 	struct ip_vs_conn_param p;
-	struct net *net = skb_net(skb);
 
-	ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
+	ah_esp_conn_fill_param_proto(ipvs, af, iph, &p);
 	cp = ip_vs_conn_out_get(&p);
 	if (!cp) {
 		IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet "
 			      "%s%s %s->%s\n",
-			      inverse ? "ICMP+" : "",
+			      ip_vs_iph_icmp(iph) ? "ICMP+" : "",
 			      ip_vs_proto_get(iph->protocol)->name,
 			      IP_VS_DBG_ADDR(af, &iph->saddr),
 			      IP_VS_DBG_ADDR(af, &iph->daddr));
@@ -107,7 +104,8 @@
 
 
 static int
-ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
+ah_esp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
+		     struct ip_vs_proto_data *pd,
 		     int *verdict, struct ip_vs_conn **cpp,
 		     struct ip_vs_iphdr *iph)
 {
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 5b84c0b..010ddee 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -9,35 +9,44 @@
 #include <net/ip_vs.h>
 
 static int
-sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
+sctp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
+		   struct ip_vs_proto_data *pd,
 		   int *verdict, struct ip_vs_conn **cpp,
 		   struct ip_vs_iphdr *iph)
 {
-	struct net *net;
 	struct ip_vs_service *svc;
-	struct netns_ipvs *ipvs;
 	sctp_chunkhdr_t _schunkh, *sch;
 	sctp_sctphdr_t *sh, _sctph;
+	__be16 _ports[2], *ports = NULL;
 
-	sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
-	if (sh == NULL) {
+	if (likely(!ip_vs_iph_icmp(iph))) {
+		sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
+		if (sh) {
+			sch = skb_header_pointer(
+				skb, iph->len + sizeof(sctp_sctphdr_t),
+				sizeof(_schunkh), &_schunkh);
+			if (sch && (sch->type == SCTP_CID_INIT ||
+				    sysctl_sloppy_sctp(ipvs)))
+				ports = &sh->source;
+		}
+	} else {
+		ports = skb_header_pointer(
+			skb, iph->len, sizeof(_ports), &_ports);
+	}
+
+	if (!ports) {
 		*verdict = NF_DROP;
 		return 0;
 	}
 
-	sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
-				 sizeof(_schunkh), &_schunkh);
-	if (sch == NULL) {
-		*verdict = NF_DROP;
-		return 0;
-	}
-
-	net = skb_net(skb);
-	ipvs = net_ipvs(net);
 	rcu_read_lock();
-	if ((sch->type == SCTP_CID_INIT || sysctl_sloppy_sctp(ipvs)) &&
-	    (svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
-				      &iph->daddr, sh->dest))) {
+	if (likely(!ip_vs_iph_inverse(iph)))
+		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
+					 &iph->daddr, ports[1]);
+	else
+		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
+					 &iph->saddr, ports[0]);
+	if (svc) {
 		int ignored;
 
 		if (ip_vs_todrop(ipvs)) {
@@ -474,14 +483,13 @@
 		& SCTP_APP_TAB_MASK;
 }
 
-static int sctp_register_app(struct net *net, struct ip_vs_app *inc)
+static int sctp_register_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
 	struct ip_vs_app *i;
 	__u16 hash;
 	__be16 port = inc->port;
 	int ret = 0;
-	struct netns_ipvs *ipvs = net_ipvs(net);
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_SCTP);
 
 	hash = sctp_app_hashkey(port);
 
@@ -498,9 +506,9 @@
 	return ret;
 }
 
-static void sctp_unregister_app(struct net *net, struct ip_vs_app *inc)
+static void sctp_unregister_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_SCTP);
 
 	atomic_dec(&pd->appcnt);
 	list_del_rcu(&inc->p_list);
@@ -508,7 +516,7 @@
 
 static int sctp_app_conn_bind(struct ip_vs_conn *cp)
 {
-	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
+	struct netns_ipvs *ipvs = cp->ipvs;
 	int hash;
 	struct ip_vs_app *inc;
 	int result = 0;
@@ -549,10 +557,8 @@
  *   timeouts is netns related now.
  * ---------------------------------------------
  */
-static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __ip_vs_sctp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
 	ip_vs_init_hash_table(ipvs->sctp_apps, SCTP_APP_TAB_SIZE);
 	pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
 							sizeof(sctp_timeouts));
@@ -561,7 +567,7 @@
 	return 0;
 }
 
-static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
+static void __ip_vs_sctp_exit(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
 	kfree(pd->timeout_table);
 }
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 8e92beb..d7024b2 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -32,27 +32,47 @@
 #include <net/ip_vs.h>
 
 static int
-tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
+tcp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
+		  struct ip_vs_proto_data *pd,
 		  int *verdict, struct ip_vs_conn **cpp,
 		  struct ip_vs_iphdr *iph)
 {
-	struct net *net;
 	struct ip_vs_service *svc;
 	struct tcphdr _tcph, *th;
-	struct netns_ipvs *ipvs;
+	__be16 _ports[2], *ports = NULL;
 
-	th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
-	if (th == NULL) {
+	/* In the event of icmp, we're only guaranteed to have the first 8
+	 * bytes of the transport header, so we only check the rest of the
+	 * TCP packet for non-ICMP packets
+	 */
+	if (likely(!ip_vs_iph_icmp(iph))) {
+		th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
+		if (th) {
+			if (th->rst || !(sysctl_sloppy_tcp(ipvs) || th->syn))
+				return 1;
+			ports = &th->source;
+		}
+	} else {
+		ports = skb_header_pointer(
+			skb, iph->len, sizeof(_ports), &_ports);
+	}
+
+	if (!ports) {
 		*verdict = NF_DROP;
 		return 0;
 	}
-	net = skb_net(skb);
-	ipvs = net_ipvs(net);
+
 	/* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
 	rcu_read_lock();
-	if ((th->syn || sysctl_sloppy_tcp(ipvs)) && !th->rst &&
-	    (svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
-				      &iph->daddr, th->dest))) {
+
+	if (likely(!ip_vs_iph_inverse(iph)))
+		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
+					 &iph->daddr, ports[1]);
+	else
+		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
+					 &iph->saddr, ports[0]);
+
+	if (svc) {
 		int ignored;
 
 		if (ip_vs_todrop(ipvs)) {
@@ -571,14 +591,13 @@
 }
 
 
-static int tcp_register_app(struct net *net, struct ip_vs_app *inc)
+static int tcp_register_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
 	struct ip_vs_app *i;
 	__u16 hash;
 	__be16 port = inc->port;
 	int ret = 0;
-	struct netns_ipvs *ipvs = net_ipvs(net);
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
 
 	hash = tcp_app_hashkey(port);
 
@@ -597,9 +616,9 @@
 
 
 static void
-tcp_unregister_app(struct net *net, struct ip_vs_app *inc)
+tcp_unregister_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
 
 	atomic_dec(&pd->appcnt);
 	list_del_rcu(&inc->p_list);
@@ -609,7 +628,7 @@
 static int
 tcp_app_conn_bind(struct ip_vs_conn *cp)
 {
-	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
+	struct netns_ipvs *ipvs = cp->ipvs;
 	int hash;
 	struct ip_vs_app *inc;
 	int result = 0;
@@ -653,9 +672,9 @@
 /*
  *	Set LISTEN timeout. (ip_vs_conn_put will setup timer)
  */
-void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
+void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
 {
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(cp->ipvs, IPPROTO_TCP);
 
 	spin_lock_bh(&cp->lock);
 	cp->state = IP_VS_TCP_S_LISTEN;
@@ -668,10 +687,8 @@
  *   timeouts is netns related now.
  * ---------------------------------------------
  */
-static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __ip_vs_tcp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
 	ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);
 	pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
 							sizeof(tcp_timeouts));
@@ -681,7 +698,7 @@
 	return 0;
 }
 
-static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
+static void __ip_vs_tcp_exit(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
 	kfree(pd->timeout_table);
 }
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index b62a3c0..e494e9a 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -29,28 +29,42 @@
 #include <net/ip6_checksum.h>
 
 static int
-udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
+udp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
+		  struct ip_vs_proto_data *pd,
 		  int *verdict, struct ip_vs_conn **cpp,
 		  struct ip_vs_iphdr *iph)
 {
-	struct net *net;
 	struct ip_vs_service *svc;
 	struct udphdr _udph, *uh;
+	__be16 _ports[2], *ports = NULL;
 
-	/* IPv6 fragments, only first fragment will hit this */
-	uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
-	if (uh == NULL) {
+	if (likely(!ip_vs_iph_icmp(iph))) {
+		/* IPv6 fragments, only first fragment will hit this */
+		uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
+		if (uh)
+			ports = &uh->source;
+	} else {
+		ports = skb_header_pointer(
+			skb, iph->len, sizeof(_ports), &_ports);
+	}
+
+	if (!ports) {
 		*verdict = NF_DROP;
 		return 0;
 	}
-	net = skb_net(skb);
+
 	rcu_read_lock();
-	svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
-				 &iph->daddr, uh->dest);
+	if (likely(!ip_vs_iph_inverse(iph)))
+		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
+					 &iph->daddr, ports[1]);
+	else
+		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
+					 &iph->saddr, ports[0]);
+
 	if (svc) {
 		int ignored;
 
-		if (ip_vs_todrop(net_ipvs(net))) {
+		if (ip_vs_todrop(ipvs)) {
 			/*
 			 * It seems that we are very loaded.
 			 * We have to drop this packet :(
@@ -348,14 +362,13 @@
 }
 
 
-static int udp_register_app(struct net *net, struct ip_vs_app *inc)
+static int udp_register_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
 	struct ip_vs_app *i;
 	__u16 hash;
 	__be16 port = inc->port;
 	int ret = 0;
-	struct netns_ipvs *ipvs = net_ipvs(net);
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP);
 
 	hash = udp_app_hashkey(port);
 
@@ -374,9 +387,9 @@
 
 
 static void
-udp_unregister_app(struct net *net, struct ip_vs_app *inc)
+udp_unregister_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP);
 
 	atomic_dec(&pd->appcnt);
 	list_del_rcu(&inc->p_list);
@@ -385,7 +398,7 @@
 
 static int udp_app_conn_bind(struct ip_vs_conn *cp)
 {
-	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
+	struct netns_ipvs *ipvs = cp->ipvs;
 	int hash;
 	struct ip_vs_app *inc;
 	int result = 0;
@@ -456,10 +469,8 @@
 	cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
 }
 
-static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __udp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
 	ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
 	pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
 							sizeof(udp_timeouts));
@@ -468,7 +479,7 @@
 	return 0;
 }
 
-static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
+static void __udp_exit(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
 	kfree(pd->timeout_table);
 }
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 98a1343..1e373a5 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -280,35 +280,29 @@
 static inline __be16
 ip_vs_sh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph)
 {
-	__be16 port;
-	struct tcphdr _tcph, *th;
-	struct udphdr _udph, *uh;
-	sctp_sctphdr_t _sctph, *sh;
+	__be16 _ports[2], *ports;
 
+	/* At this point we know that we have a valid packet of some kind.
+	 * Because ICMP packets are only guaranteed to have the first 8
+	 * bytes, let's just grab the ports.  Fortunately they're in the
+	 * same position for all three of the protocols we care about.
+	 */
 	switch (iph->protocol) {
 	case IPPROTO_TCP:
-		th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
-		if (unlikely(th == NULL))
-			return 0;
-		port = th->source;
-		break;
 	case IPPROTO_UDP:
-		uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
-		if (unlikely(uh == NULL))
-			return 0;
-		port = uh->source;
-		break;
 	case IPPROTO_SCTP:
-		sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
-		if (unlikely(sh == NULL))
+		ports = skb_header_pointer(skb, iph->len, sizeof(_ports),
+					   &_ports);
+		if (unlikely(!ports))
 			return 0;
-		port = sh->source;
-		break;
-	default:
-		port = 0;
-	}
 
-	return port;
+		if (likely(!ip_vs_iph_inverse(iph)))
+			return ports[0];
+		else
+			return ports[1];
+	default:
+		return 0;
+	}
 }
 
 
@@ -322,6 +316,9 @@
 	struct ip_vs_dest *dest;
 	struct ip_vs_sh_state *s;
 	__be16 port = 0;
+	const union nf_inet_addr *hash_addr;
+
+	hash_addr = ip_vs_iph_inverse(iph) ? &iph->daddr : &iph->saddr;
 
 	IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n");
 
@@ -331,9 +328,9 @@
 	s = (struct ip_vs_sh_state *) svc->sched_data;
 
 	if (svc->flags & IP_VS_SVC_F_SCHED_SH_FALLBACK)
-		dest = ip_vs_sh_get_fallback(svc, s, &iph->saddr, port);
+		dest = ip_vs_sh_get_fallback(svc, s, hash_addr, port);
 	else
-		dest = ip_vs_sh_get(svc, s, &iph->saddr, port);
+		dest = ip_vs_sh_get(svc, s, hash_addr, port);
 
 	if (!dest) {
 		ip_vs_scheduler_err(svc, "no destination available");
@@ -341,7 +338,7 @@
 	}
 
 	IP_VS_DBG_BUF(6, "SH: source IP address %s --> server %s:%d\n",
-		      IP_VS_DBG_ADDR(svc->af, &iph->saddr),
+		      IP_VS_DBG_ADDR(svc->af, hash_addr),
 		      IP_VS_DBG_ADDR(dest->af, &dest->addr),
 		      ntohs(dest->port));
 
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 43f1409..803001a 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -193,7 +193,7 @@
 #define IPVS_OPT_F_PARAM	(1 << (IPVS_OPT_PARAM-1))
 
 struct ip_vs_sync_thread_data {
-	struct net *net;
+	struct netns_ipvs *ipvs;
 	struct socket *sock;
 	char *buf;
 	int id;
@@ -533,10 +533,9 @@
  *      Version 0 , could be switched in by sys_ctl.
  *      Add an ip_vs_conn information into the current sync_buff.
  */
-static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
+static void ip_vs_sync_conn_v0(struct netns_ipvs *ipvs, struct ip_vs_conn *cp,
 			       int pkts)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_sync_mesg_v0 *m;
 	struct ip_vs_sync_conn_v0 *s;
 	struct ip_vs_sync_buff *buff;
@@ -615,7 +614,7 @@
 			pkts = atomic_add_return(1, &cp->in_pkts);
 		else
 			pkts = sysctl_sync_threshold(ipvs);
-		ip_vs_sync_conn(net, cp, pkts);
+		ip_vs_sync_conn(ipvs, cp, pkts);
 	}
 }
 
@@ -624,9 +623,8 @@
  *      Called by ip_vs_in.
  *      Sending Version 1 messages
  */
-void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts)
+void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_sync_mesg *m;
 	union ip_vs_sync_conn *s;
 	struct ip_vs_sync_buff *buff;
@@ -637,7 +635,7 @@
 
 	/* Handle old version of the protocol */
 	if (sysctl_sync_ver(ipvs) == 0) {
-		ip_vs_sync_conn_v0(net, cp, pkts);
+		ip_vs_sync_conn_v0(ipvs, cp, pkts);
 		return;
 	}
 	/* Do not sync ONE PACKET */
@@ -784,21 +782,21 @@
  *  fill_param used by version 1
  */
 static inline int
-ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc,
+ip_vs_conn_fill_param_sync(struct netns_ipvs *ipvs, int af, union ip_vs_sync_conn *sc,
 			   struct ip_vs_conn_param *p,
 			   __u8 *pe_data, unsigned int pe_data_len,
 			   __u8 *pe_name, unsigned int pe_name_len)
 {
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6)
-		ip_vs_conn_fill_param(net, af, sc->v6.protocol,
+		ip_vs_conn_fill_param(ipvs, af, sc->v6.protocol,
 				      (const union nf_inet_addr *)&sc->v6.caddr,
 				      sc->v6.cport,
 				      (const union nf_inet_addr *)&sc->v6.vaddr,
 				      sc->v6.vport, p);
 	else
 #endif
-		ip_vs_conn_fill_param(net, af, sc->v4.protocol,
+		ip_vs_conn_fill_param(ipvs, af, sc->v4.protocol,
 				      (const union nf_inet_addr *)&sc->v4.caddr,
 				      sc->v4.cport,
 				      (const union nf_inet_addr *)&sc->v4.vaddr,
@@ -837,7 +835,7 @@
  *  Param: ...
  *         timeout is in sec.
  */
-static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
+static void ip_vs_proc_conn(struct netns_ipvs *ipvs, struct ip_vs_conn_param *param,
 			    unsigned int flags, unsigned int state,
 			    unsigned int protocol, unsigned int type,
 			    const union nf_inet_addr *daddr, __be16 dport,
@@ -846,7 +844,6 @@
 {
 	struct ip_vs_dest *dest;
 	struct ip_vs_conn *cp;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
 		cp = ip_vs_conn_in_get(param);
@@ -904,7 +901,7 @@
 		 * with synchronization, so we can make the assumption that
 		 * the svc_af is the same as the dest_af
 		 */
-		dest = ip_vs_find_dest(net, type, type, daddr, dport,
+		dest = ip_vs_find_dest(ipvs, type, type, daddr, dport,
 				       param->vaddr, param->vport, protocol,
 				       fwmark, flags);
 
@@ -941,7 +938,7 @@
 	} else {
 		struct ip_vs_proto_data *pd;
 
-		pd = ip_vs_proto_data_get(net, protocol);
+		pd = ip_vs_proto_data_get(ipvs, protocol);
 		if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table)
 			cp->timeout = pd->timeout_table[state];
 		else
@@ -953,7 +950,7 @@
 /*
  *  Process received multicast message for Version 0
  */
-static void ip_vs_process_message_v0(struct net *net, const char *buffer,
+static void ip_vs_process_message_v0(struct netns_ipvs *ipvs, const char *buffer,
 				     const size_t buflen)
 {
 	struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer;
@@ -1009,14 +1006,14 @@
 			}
 		}
 
-		ip_vs_conn_fill_param(net, AF_INET, s->protocol,
+		ip_vs_conn_fill_param(ipvs, AF_INET, s->protocol,
 				      (const union nf_inet_addr *)&s->caddr,
 				      s->cport,
 				      (const union nf_inet_addr *)&s->vaddr,
 				      s->vport, &param);
 
 		/* Send timeout as Zero */
-		ip_vs_proc_conn(net, &param, flags, state, s->protocol, AF_INET,
+		ip_vs_proc_conn(ipvs, &param, flags, state, s->protocol, AF_INET,
 				(union nf_inet_addr *)&s->daddr, s->dport,
 				0, 0, opt);
 	}
@@ -1067,7 +1064,7 @@
 /*
  *   Process a Version 1 sync. connection
  */
-static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
+static inline int ip_vs_proc_sync_conn(struct netns_ipvs *ipvs, __u8 *p, __u8 *msg_end)
 {
 	struct ip_vs_sync_conn_options opt;
 	union  ip_vs_sync_conn *s;
@@ -1171,21 +1168,21 @@
 			state = 0;
 		}
 	}
-	if (ip_vs_conn_fill_param_sync(net, af, s, &param, pe_data,
+	if (ip_vs_conn_fill_param_sync(ipvs, af, s, &param, pe_data,
 				       pe_data_len, pe_name, pe_name_len)) {
 		retc = 50;
 		goto out;
 	}
 	/* If only IPv4, just silent skip IPv6 */
 	if (af == AF_INET)
-		ip_vs_proc_conn(net, &param, flags, state, s->v4.protocol, af,
+		ip_vs_proc_conn(ipvs, &param, flags, state, s->v4.protocol, af,
 				(union nf_inet_addr *)&s->v4.daddr, s->v4.dport,
 				ntohl(s->v4.timeout), ntohl(s->v4.fwmark),
 				(opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
 				);
 #ifdef CONFIG_IP_VS_IPV6
 	else
-		ip_vs_proc_conn(net, &param, flags, state, s->v6.protocol, af,
+		ip_vs_proc_conn(ipvs, &param, flags, state, s->v6.protocol, af,
 				(union nf_inet_addr *)&s->v6.daddr, s->v6.dport,
 				ntohl(s->v6.timeout), ntohl(s->v6.fwmark),
 				(opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
@@ -1204,10 +1201,9 @@
  *      ip_vs_conn entries.
  *      Handles Version 0 & 1
  */
-static void ip_vs_process_message(struct net *net, __u8 *buffer,
+static void ip_vs_process_message(struct netns_ipvs *ipvs, __u8 *buffer,
 				  const size_t buflen)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer;
 	__u8 *p, *msg_end;
 	int i, nr_conns;
@@ -1257,7 +1253,7 @@
 				return;
 			}
 			/* Process a single sync_conn */
-			retc = ip_vs_proc_sync_conn(net, p, msg_end);
+			retc = ip_vs_proc_sync_conn(ipvs, p, msg_end);
 			if (retc < 0) {
 				IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n",
 					     retc);
@@ -1268,7 +1264,7 @@
 		}
 	} else {
 		/* Old type of message */
-		ip_vs_process_message_v0(net, buffer, buflen);
+		ip_vs_process_message_v0(ipvs, buffer, buflen);
 		return;
 	}
 }
@@ -1493,16 +1489,15 @@
 /*
  *      Set up sending multicast socket over UDP
  */
-static struct socket *make_send_sock(struct net *net, int id)
+static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	/* multicast addr */
 	union ipvs_sockaddr mcast_addr;
 	struct socket *sock;
 	int result, salen;
 
 	/* First create a socket */
-	result = sock_create_kern(net, ipvs->mcfg.mcast_af, SOCK_DGRAM,
+	result = sock_create_kern(ipvs->net, ipvs->mcfg.mcast_af, SOCK_DGRAM,
 				  IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
@@ -1550,16 +1545,15 @@
 /*
  *      Set up receiving multicast socket over UDP
  */
-static struct socket *make_receive_sock(struct net *net, int id)
+static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	/* multicast addr */
 	union ipvs_sockaddr mcast_addr;
 	struct socket *sock;
 	int result, salen;
 
 	/* First create a socket */
-	result = sock_create_kern(net, ipvs->bcfg.mcast_af, SOCK_DGRAM,
+	result = sock_create_kern(ipvs->net, ipvs->bcfg.mcast_af, SOCK_DGRAM,
 				  IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
@@ -1687,7 +1681,7 @@
 static int sync_thread_master(void *data)
 {
 	struct ip_vs_sync_thread_data *tinfo = data;
-	struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
+	struct netns_ipvs *ipvs = tinfo->ipvs;
 	struct ipvs_master_sync_state *ms = &ipvs->ms[tinfo->id];
 	struct sock *sk = tinfo->sock->sk;
 	struct ip_vs_sync_buff *sb;
@@ -1743,7 +1737,7 @@
 static int sync_thread_backup(void *data)
 {
 	struct ip_vs_sync_thread_data *tinfo = data;
-	struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
+	struct netns_ipvs *ipvs = tinfo->ipvs;
 	int len;
 
 	pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
@@ -1765,7 +1759,7 @@
 				break;
 			}
 
-			ip_vs_process_message(tinfo->net, tinfo->buf, len);
+			ip_vs_process_message(ipvs, tinfo->buf, len);
 		}
 	}
 
@@ -1778,13 +1772,12 @@
 }
 
 
-int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *c,
+int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
 		      int state)
 {
 	struct ip_vs_sync_thread_data *tinfo;
 	struct task_struct **array = NULL, *task;
 	struct socket *sock;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct net_device *dev;
 	char *name;
 	int (*threadfn)(void *data);
@@ -1811,7 +1804,7 @@
 	if (!c->mcast_ttl)
 		c->mcast_ttl = 1;
 
-	dev = __dev_get_by_name(net, c->mcast_ifn);
+	dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
 	if (!dev) {
 		pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
 		return -ENODEV;
@@ -1873,9 +1866,9 @@
 	tinfo = NULL;
 	for (id = 0; id < count; id++) {
 		if (state == IP_VS_STATE_MASTER)
-			sock = make_send_sock(net, id);
+			sock = make_send_sock(ipvs, id);
 		else
-			sock = make_receive_sock(net, id);
+			sock = make_receive_sock(ipvs, id);
 		if (IS_ERR(sock)) {
 			result = PTR_ERR(sock);
 			goto outtinfo;
@@ -1883,7 +1876,7 @@
 		tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
 		if (!tinfo)
 			goto outsocket;
-		tinfo->net = net;
+		tinfo->ipvs = ipvs;
 		tinfo->sock = sock;
 		if (state == IP_VS_STATE_BACKUP) {
 			tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
@@ -1947,9 +1940,8 @@
 }
 
 
-int stop_sync_thread(struct net *net, int state)
+int stop_sync_thread(struct netns_ipvs *ipvs, int state)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct task_struct **array;
 	int id;
 	int retc = -EINVAL;
@@ -2015,27 +2007,24 @@
 /*
  * Initialize data struct for each netns
  */
-int __net_init ip_vs_sync_net_init(struct net *net)
+int __net_init ip_vs_sync_net_init(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
 	__mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key);
 	spin_lock_init(&ipvs->sync_lock);
 	spin_lock_init(&ipvs->sync_buff_lock);
 	return 0;
 }
 
-void ip_vs_sync_net_cleanup(struct net *net)
+void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
 {
 	int retc;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	mutex_lock(&ipvs->sync_mutex);
-	retc = stop_sync_thread(net, IP_VS_STATE_MASTER);
+	retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER);
 	if (retc && retc != -ESRCH)
 		pr_err("Failed to stop Master Daemon\n");
 
-	retc = stop_sync_thread(net, IP_VS_STATE_BACKUP);
+	retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP);
 	if (retc && retc != -ESRCH)
 		pr_err("Failed to stop Backup Daemon\n");
 	mutex_unlock(&ipvs->sync_mutex);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index cc72990..3264cb49 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -212,19 +212,20 @@
 		ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
 }
 
-static inline bool ensure_mtu_is_adequate(int skb_af, int rt_mode,
+static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af,
+					  int rt_mode,
 					  struct ip_vs_iphdr *ipvsh,
 					  struct sk_buff *skb, int mtu)
 {
 #ifdef CONFIG_IP_VS_IPV6
 	if (skb_af == AF_INET6) {
-		struct net *net = dev_net(skb_dst(skb)->dev);
+		struct net *net = ipvs->net;
 
 		if (unlikely(__mtu_check_toobig_v6(skb, mtu))) {
 			if (!skb->dev)
 				skb->dev = net->loopback_dev;
 			/* only send ICMP too big on first fragment */
-			if (!ipvsh->fragoffs)
+			if (!ipvsh->fragoffs && !ip_vs_iph_icmp(ipvsh))
 				icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 			IP_VS_DBG(1, "frag needed for %pI6c\n",
 				  &ipv6_hdr(skb)->saddr);
@@ -233,8 +234,6 @@
 	} else
 #endif
 	{
-		struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
-
 		/* If we're going to tunnel the packet and pmtu discovery
 		 * is disabled, we'll just fragment it anyway
 		 */
@@ -242,7 +241,8 @@
 			return true;
 
 		if (unlikely(ip_hdr(skb)->frag_off & htons(IP_DF) &&
-			     skb->len > mtu && !skb_is_gso(skb))) {
+			     skb->len > mtu && !skb_is_gso(skb) &&
+			     !ip_vs_iph_icmp(ipvsh))) {
 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
 				  htonl(mtu));
 			IP_VS_DBG(1, "frag needed for %pI4\n",
@@ -256,11 +256,12 @@
 
 /* Get route to destination or remote server */
 static int
-__ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
+__ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
+		   struct ip_vs_dest *dest,
 		   __be32 daddr, int rt_mode, __be32 *ret_saddr,
 		   struct ip_vs_iphdr *ipvsh)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
+	struct net *net = ipvs->net;
 	struct ip_vs_dest_dst *dest_dst;
 	struct rtable *rt;			/* Route to the other host */
 	int mtu;
@@ -336,7 +337,7 @@
 		maybe_update_pmtu(skb_af, skb, mtu);
 	}
 
-	if (!ensure_mtu_is_adequate(skb_af, rt_mode, ipvsh, skb, mtu))
+	if (!ensure_mtu_is_adequate(ipvs, skb_af, rt_mode, ipvsh, skb, mtu))
 		goto err_put;
 
 	skb_dst_drop(skb);
@@ -402,11 +403,12 @@
  * Get route to destination or remote server
  */
 static int
-__ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
+__ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
+		      struct ip_vs_dest *dest,
 		      struct in6_addr *daddr, struct in6_addr *ret_saddr,
 		      struct ip_vs_iphdr *ipvsh, int do_xfrm, int rt_mode)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
+	struct net *net = ipvs->net;
 	struct ip_vs_dest_dst *dest_dst;
 	struct rt6_info *rt;			/* Route to the other host */
 	struct dst_entry *dst;
@@ -484,7 +486,7 @@
 		maybe_update_pmtu(skb_af, skb, mtu);
 	}
 
-	if (!ensure_mtu_is_adequate(skb_af, rt_mode, ipvsh, skb, mtu))
+	if (!ensure_mtu_is_adequate(ipvs, skb_af, rt_mode, ipvsh, skb, mtu))
 		goto err_put;
 
 	skb_dst_drop(skb);
@@ -573,8 +575,8 @@
 		skb_forward_csum(skb);
 		if (!skb->sk)
 			skb_sender_cpu_clear(skb);
-		NF_HOOK(pf, NF_INET_LOCAL_OUT, ip_vs_conn_net(cp), NULL, skb,
-			NULL, skb_dst(skb)->dev, dst_output_okfn);
+		NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
+			NULL, skb_dst(skb)->dev, dst_output);
 	} else
 		ret = NF_ACCEPT;
 
@@ -595,8 +597,8 @@
 		skb_forward_csum(skb);
 		if (!skb->sk)
 			skb_sender_cpu_clear(skb);
-		NF_HOOK(pf, NF_INET_LOCAL_OUT, ip_vs_conn_net(cp), NULL, skb,
-			NULL, skb_dst(skb)->dev, dst_output_okfn);
+		NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
+			NULL, skb_dst(skb)->dev, dst_output);
 	} else
 		ret = NF_ACCEPT;
 	return ret;
@@ -629,7 +631,7 @@
 	EnterFunction(10);
 
 	rcu_read_lock();
-	if (__ip_vs_get_out_rt(cp->af, skb, NULL, iph->daddr,
+	if (__ip_vs_get_out_rt(cp->ipvs, cp->af, skb, NULL, iph->daddr,
 			       IP_VS_RT_MODE_NON_LOCAL, NULL, ipvsh) < 0)
 		goto tx_error;
 
@@ -656,10 +658,13 @@
 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 		     struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
 {
+	struct ipv6hdr *iph = ipv6_hdr(skb);
+
 	EnterFunction(10);
 
 	rcu_read_lock();
-	if (__ip_vs_get_out_rt_v6(cp->af, skb, NULL, &ipvsh->daddr.in6, NULL,
+	if (__ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, NULL,
+				  &iph->daddr, NULL,
 				  ipvsh, 0, IP_VS_RT_MODE_NON_LOCAL) < 0)
 		goto tx_error;
 
@@ -706,7 +711,7 @@
 	}
 
 	was_input = rt_is_input_route(skb_rtable(skb));
-	local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip,
+	local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
 				   IP_VS_RT_MODE_LOCAL |
 				   IP_VS_RT_MODE_NON_LOCAL |
 				   IP_VS_RT_MODE_RDR, NULL, ipvsh);
@@ -723,7 +728,7 @@
 		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
 		if (ct && !nf_ct_is_untracked(ct)) {
-			IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0,
+			IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, ipvsh->off,
 					 "ip_vs_nat_xmit(): "
 					 "stopping DNAT to local address");
 			goto tx_error;
@@ -733,8 +738,9 @@
 
 	/* From world but DNAT to loopback address? */
 	if (local && ipv4_is_loopback(cp->daddr.ip) && was_input) {
-		IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
-				 "stopping DNAT to loopback address");
+		IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, ipvsh->off,
+				 "ip_vs_nat_xmit(): stopping DNAT to loopback "
+				 "address");
 		goto tx_error;
 	}
 
@@ -751,7 +757,7 @@
 	ip_hdr(skb)->daddr = cp->daddr.ip;
 	ip_send_check(ip_hdr(skb));
 
-	IP_VS_DBG_PKT(10, AF_INET, pp, skb, 0, "After DNAT");
+	IP_VS_DBG_PKT(10, AF_INET, pp, skb, ipvsh->off, "After DNAT");
 
 	/* FIXME: when application helper enlarges the packet and the length
 	   is larger than the MTU of outgoing device, there will be still
@@ -794,7 +800,8 @@
 		IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
 	}
 
-	local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
+	local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
+				      &cp->daddr.in6,
 				      NULL, ipvsh, 0,
 				      IP_VS_RT_MODE_LOCAL |
 				      IP_VS_RT_MODE_NON_LOCAL |
@@ -812,7 +819,7 @@
 		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
 		if (ct && !nf_ct_is_untracked(ct)) {
-			IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0,
+			IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, ipvsh->off,
 					 "ip_vs_nat_xmit_v6(): "
 					 "stopping DNAT to local address");
 			goto tx_error;
@@ -823,7 +830,7 @@
 	/* From world but DNAT to loopback address? */
 	if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
 	    ipv6_addr_type(&cp->daddr.in6) & IPV6_ADDR_LOOPBACK) {
-		IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0,
+		IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, ipvsh->off,
 				 "ip_vs_nat_xmit_v6(): "
 				 "stopping DNAT to loopback address");
 		goto tx_error;
@@ -841,7 +848,7 @@
 		goto tx_error;
 	ipv6_hdr(skb)->daddr = cp->daddr.in6;
 
-	IP_VS_DBG_PKT(10, AF_INET6, pp, skb, 0, "After DNAT");
+	IP_VS_DBG_PKT(10, AF_INET6, pp, skb, ipvsh->off, "After DNAT");
 
 	/* FIXME: when application helper enlarges the packet and the length
 	   is larger than the MTU of outgoing device, there will be still
@@ -967,8 +974,8 @@
 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 		  struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
 {
-	struct net *net = skb_net(skb);
-	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct netns_ipvs *ipvs = cp->ipvs;
+	struct net *net = ipvs->net;
 	struct rtable *rt;			/* Route to the other host */
 	__be32 saddr;				/* Source for tunnel */
 	struct net_device *tdev;		/* Device to other host */
@@ -984,7 +991,7 @@
 	EnterFunction(10);
 
 	rcu_read_lock();
-	local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip,
+	local = __ip_vs_get_out_rt(ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
 				   IP_VS_RT_MODE_LOCAL |
 				   IP_VS_RT_MODE_NON_LOCAL |
 				   IP_VS_RT_MODE_CONNECT |
@@ -1042,7 +1049,7 @@
 
 	ret = ip_vs_tunnel_xmit_prepare(skb, cp);
 	if (ret == NF_ACCEPT)
-		ip_local_out(skb);
+		ip_local_out(net, skb->sk, skb);
 	else if (ret == NF_DROP)
 		kfree_skb(skb);
 	rcu_read_unlock();
@@ -1078,7 +1085,8 @@
 	EnterFunction(10);
 
 	rcu_read_lock();
-	local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
+	local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
+				      &cp->daddr.in6,
 				      &saddr, ipvsh, 1,
 				      IP_VS_RT_MODE_LOCAL |
 				      IP_VS_RT_MODE_NON_LOCAL |
@@ -1133,7 +1141,7 @@
 
 	ret = ip_vs_tunnel_xmit_prepare(skb, cp);
 	if (ret == NF_ACCEPT)
-		ip6_local_out(skb);
+		ip6_local_out(cp->ipvs->net, skb->sk, skb);
 	else if (ret == NF_DROP)
 		kfree_skb(skb);
 	rcu_read_unlock();
@@ -1165,7 +1173,7 @@
 	EnterFunction(10);
 
 	rcu_read_lock();
-	local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip,
+	local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
 				   IP_VS_RT_MODE_LOCAL |
 				   IP_VS_RT_MODE_NON_LOCAL |
 				   IP_VS_RT_MODE_KNOWN_NH, NULL, ipvsh);
@@ -1204,7 +1212,8 @@
 	EnterFunction(10);
 
 	rcu_read_lock();
-	local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
+	local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
+				      &cp->daddr.in6,
 				      NULL, ipvsh, 0,
 				      IP_VS_RT_MODE_LOCAL |
 				      IP_VS_RT_MODE_NON_LOCAL |
@@ -1273,7 +1282,7 @@
 		  IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
 		  IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
 	rcu_read_lock();
-	local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, rt_mode,
+	local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip, rt_mode,
 				   NULL, iph);
 	if (local < 0)
 		goto tx_error;
@@ -1365,8 +1374,8 @@
 		  IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
 		  IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
 	rcu_read_lock();
-	local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
-				      NULL, ipvsh, 0, rt_mode);
+	local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
+				      &cp->daddr.in6, NULL, ipvsh, 0, rt_mode);
 	if (local < 0)
 		goto tx_error;
 	rt = (struct rt6_info *) skb_dst(skb);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index c09d6c7..09d1d19 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -168,6 +168,7 @@
 		unsigned int dataoff,
 		u_int16_t l3num,
 		u_int8_t protonum,
+		struct net *net,
 		struct nf_conntrack_tuple *tuple,
 		const struct nf_conntrack_l3proto *l3proto,
 		const struct nf_conntrack_l4proto *l4proto)
@@ -181,12 +182,13 @@
 	tuple->dst.protonum = protonum;
 	tuple->dst.dir = IP_CT_DIR_ORIGINAL;
 
-	return l4proto->pkt_to_tuple(skb, dataoff, tuple);
+	return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
 }
 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
 
 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
-		       u_int16_t l3num, struct nf_conntrack_tuple *tuple)
+		       u_int16_t l3num,
+		       struct net *net, struct nf_conntrack_tuple *tuple)
 {
 	struct nf_conntrack_l3proto *l3proto;
 	struct nf_conntrack_l4proto *l4proto;
@@ -205,7 +207,7 @@
 
 	l4proto = __nf_ct_l4proto_find(l3num, protonum);
 
-	ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
+	ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
 			      l3proto, l4proto);
 
 	rcu_read_unlock();
@@ -1029,7 +1031,7 @@
 	u32 hash;
 
 	if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
-			     dataoff, l3num, protonum, &tuple, l3proto,
+			     dataoff, l3num, protonum, net, &tuple, l3proto,
 			     l4proto)) {
 		pr_debug("resolve_normal_ct: Can't get tuple\n");
 		return NULL;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 6dd995c..fce1b1c 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -398,7 +398,7 @@
 }
 
 static bool dccp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
-			      struct nf_conntrack_tuple *tuple)
+			      struct net *net, struct nf_conntrack_tuple *tuple)
 {
 	struct dccp_hdr _hdr, *dh;
 
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 2281be4..86dc752 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -45,7 +45,7 @@
 
 static bool generic_pkt_to_tuple(const struct sk_buff *skb,
 				 unsigned int dataoff,
-				 struct nf_conntrack_tuple *tuple)
+				 struct net *net, struct nf_conntrack_tuple *tuple)
 {
 	tuple->src.u.all = 0;
 	tuple->dst.u.all = 0;
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 7648674..a96451a 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -190,9 +190,8 @@
 
 /* gre hdr info to tuple */
 static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
-			     struct nf_conntrack_tuple *tuple)
+			     struct net *net, struct nf_conntrack_tuple *tuple)
 {
-	struct net *net = dev_net(skb->dev ? skb->dev : skb_dst(skb)->dev);
 	const struct gre_hdr_pptp *pgrehdr;
 	struct gre_hdr_pptp _pgrehdr;
 	__be16 srckey;
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 6719773..9578a7c 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -156,7 +156,7 @@
 }
 
 static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
-			      struct nf_conntrack_tuple *tuple)
+			      struct net *net, struct nf_conntrack_tuple *tuple)
 {
 	const struct sctphdr *hp;
 	struct sctphdr _hdr;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 70383de..278f3b9 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -277,7 +277,7 @@
 }
 
 static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
-			     struct nf_conntrack_tuple *tuple)
+			     struct net *net, struct nf_conntrack_tuple *tuple)
 {
 	const struct tcphdr *hp;
 	struct tcphdr _hdr;
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 6957281..478f92f 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -38,6 +38,7 @@
 
 static bool udp_pkt_to_tuple(const struct sk_buff *skb,
 			     unsigned int dataoff,
+			     struct net *net,
 			     struct nf_conntrack_tuple *tuple)
 {
 	const struct udphdr *hp;
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index c5903d1..1ac8ee1 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -48,6 +48,7 @@
 
 static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
 				 unsigned int dataoff,
+				 struct net *net,
 				 struct nf_conntrack_tuple *tuple)
 {
 	const struct udphdr *hp;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 675d12c..a5d41df 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -107,12 +107,17 @@
 
 void nf_log_unregister(struct nf_logger *logger)
 {
+	const struct nf_logger *log;
 	int i;
 
 	mutex_lock(&nf_log_mutex);
-	for (i = 0; i < NFPROTO_NUMPROTO; i++)
-		RCU_INIT_POINTER(loggers[i][logger->type], NULL);
+	for (i = 0; i < NFPROTO_NUMPROTO; i++) {
+		log = nft_log_dereference(loggers[i][logger->type]);
+		if (log == logger)
+			RCU_INIT_POINTER(loggers[i][logger->type], NULL);
+	}
 	mutex_unlock(&nf_log_mutex);
+	synchronize_rcu();
 }
 EXPORT_SYMBOL(nf_log_unregister);
 
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 5113dfd..06a9f45 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -83,7 +83,7 @@
 	rcu_read_unlock();
 }
 
-int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family)
+int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
 {
 	struct flowi fl;
 	unsigned int hh_len;
@@ -99,7 +99,7 @@
 		dst = ((struct xfrm_dst *)dst)->route;
 	dst_hold(dst);
 
-	dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0);
+	dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
 	if (IS_ERR(dst))
 		return PTR_ERR(dst);
 
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 9f3c3c2..34f628e 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -199,7 +199,7 @@
 
 	if (verdict == NF_ACCEPT) {
 		afinfo = nf_get_afinfo(entry->state.pf);
-		if (!afinfo || afinfo->reroute(skb, entry) < 0)
+		if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0)
 			verdict = NF_DROP;
 	}
 
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 05d0b03..f3695a4 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -48,9 +48,7 @@
 			       const struct nft_chain *chain,
 			       int rulenum, enum nft_trace type)
 {
-	struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
-
-	nf_log_trace(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
+	nf_log_trace(pkt->net, pkt->pf, pkt->hook, pkt->skb, pkt->in,
 		     pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
 		     chain->table->name, chain->name, comments[type],
 		     rulenum);
@@ -111,10 +109,10 @@
 };
 
 unsigned int
-nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
+nft_do_chain(struct nft_pktinfo *pkt, void *priv)
 {
-	const struct nft_chain *chain = ops->priv, *basechain = chain;
-	const struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
+	const struct nft_chain *chain = priv, *basechain = chain;
+	const struct net *net = pkt->net;
 	const struct nft_rule *rule;
 	const struct nft_expr *expr, *last;
 	struct nft_regs regs;
diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c
index 2cae4d4..7b9c053 100644
--- a/net/netfilter/nf_tables_netdev.c
+++ b/net/netfilter/nf_tables_netdev.c
@@ -17,13 +17,13 @@
 
 static inline void
 nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
-			    const struct nf_hook_ops *ops, struct sk_buff *skb,
+			    struct sk_buff *skb,
 			    const struct nf_hook_state *state)
 {
 	struct iphdr *iph, _iph;
 	u32 len, thoff;
 
-	nft_set_pktinfo(pkt, ops, skb, state);
+	nft_set_pktinfo(pkt, skb, state);
 
 	iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
 				 &_iph);
@@ -48,7 +48,6 @@
 
 static inline void
 __nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
-			      const struct nf_hook_ops *ops,
 			      struct sk_buff *skb,
 			      const struct nf_hook_state *state)
 {
@@ -82,33 +81,32 @@
 }
 
 static inline void nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
-					       const struct nf_hook_ops *ops,
 					       struct sk_buff *skb,
 					       const struct nf_hook_state *state)
 {
-	nft_set_pktinfo(pkt, ops, skb, state);
-	__nft_netdev_set_pktinfo_ipv6(pkt, ops, skb, state);
+	nft_set_pktinfo(pkt, skb, state);
+	__nft_netdev_set_pktinfo_ipv6(pkt, skb, state);
 }
 
 static unsigned int
-nft_do_chain_netdev(const struct nf_hook_ops *ops, struct sk_buff *skb,
+nft_do_chain_netdev(void *priv, struct sk_buff *skb,
 		    const struct nf_hook_state *state)
 {
 	struct nft_pktinfo pkt;
 
 	switch (eth_hdr(skb)->h_proto) {
 	case htons(ETH_P_IP):
-		nft_netdev_set_pktinfo_ipv4(&pkt, ops, skb, state);
+		nft_netdev_set_pktinfo_ipv4(&pkt, skb, state);
 		break;
 	case htons(ETH_P_IPV6):
-		nft_netdev_set_pktinfo_ipv6(&pkt, ops, skb, state);
+		nft_netdev_set_pktinfo_ipv6(&pkt, skb, state);
 		break;
 	default:
-		nft_set_pktinfo(&pkt, ops, skb, state);
+		nft_set_pktinfo(&pkt, skb, state);
 		break;
 	}
 
-	return nft_do_chain(&pkt, ops);
+	return nft_do_chain(&pkt, priv);
 }
 
 static struct nft_af_info nft_af_netdev __read_mostly = {
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 70277b1..f1d9e88 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -64,7 +64,7 @@
 EXPORT_SYMBOL_GPL(nfnl_unlock);
 
 #ifdef CONFIG_PROVE_LOCKING
-int lockdep_nfnl_is_held(u8 subsys_id)
+bool lockdep_nfnl_is_held(u8 subsys_id)
 {
 	return lockdep_is_held(&table[subsys_id].mutex);
 }
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 4670821..cc2300f 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -538,9 +538,9 @@
 
 	if (skb->tstamp.tv64) {
 		struct nfulnl_msg_packet_timestamp ts;
-		struct timeval tv = ktime_to_timeval(skb->tstamp);
-		ts.sec = cpu_to_be64(tv.tv_sec);
-		ts.usec = cpu_to_be64(tv.tv_usec);
+		struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
+		ts.sec = cpu_to_be64(kts.tv_sec);
+		ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
 
 		if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
 			goto nla_put_failure;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 66def31..9c8fab0 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -619,6 +619,13 @@
 
 static struct nft_expr_type nft_match_type;
 
+static bool nft_match_cmp(const struct xt_match *match,
+			  const char *name, u32 rev, u32 family)
+{
+	return strcmp(match->name, name) == 0 && match->revision == rev &&
+	       (match->family == NFPROTO_UNSPEC || match->family == family);
+}
+
 static const struct nft_expr_ops *
 nft_match_select_ops(const struct nft_ctx *ctx,
 		     const struct nlattr * const tb[])
@@ -626,7 +633,7 @@
 	struct nft_xt *nft_match;
 	struct xt_match *match;
 	char *mt_name;
-	__u32 rev, family;
+	u32 rev, family;
 
 	if (tb[NFTA_MATCH_NAME] == NULL ||
 	    tb[NFTA_MATCH_REV] == NULL ||
@@ -641,8 +648,7 @@
 	list_for_each_entry(nft_match, &nft_match_list, head) {
 		struct xt_match *match = nft_match->ops.data;
 
-		if (strcmp(match->name, mt_name) == 0 &&
-		    match->revision == rev && match->family == family) {
+		if (nft_match_cmp(match, mt_name, rev, family)) {
 			if (!try_module_get(match->me))
 				return ERR_PTR(-ENOENT);
 
@@ -693,6 +699,13 @@
 
 static struct nft_expr_type nft_target_type;
 
+static bool nft_target_cmp(const struct xt_target *tg,
+			   const char *name, u32 rev, u32 family)
+{
+	return strcmp(tg->name, name) == 0 && tg->revision == rev &&
+	       (tg->family == NFPROTO_UNSPEC || tg->family == family);
+}
+
 static const struct nft_expr_ops *
 nft_target_select_ops(const struct nft_ctx *ctx,
 		      const struct nlattr * const tb[])
@@ -700,7 +713,7 @@
 	struct nft_xt *nft_target;
 	struct xt_target *target;
 	char *tg_name;
-	__u32 rev, family;
+	u32 rev, family;
 
 	if (tb[NFTA_TARGET_NAME] == NULL ||
 	    tb[NFTA_TARGET_REV] == NULL ||
@@ -715,8 +728,7 @@
 	list_for_each_entry(nft_target, &nft_target_list, head) {
 		struct xt_target *target = nft_target->ops.data;
 
-		if (strcmp(target->name, tg_name) == 0 &&
-		    target->revision == rev && target->family == family) {
+		if (nft_target_cmp(target, tg_name, rev, family)) {
 			if (!try_module_get(target->me))
 				return ERR_PTR(-ENOENT);
 
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index a13d6a3..319c22b 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -31,9 +31,8 @@
 			 const struct nft_pktinfo *pkt)
 {
 	const struct nft_log *priv = nft_expr_priv(expr);
-	struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
 
-	nf_log_packet(net, pkt->ops->pf, pkt->ops->hooknum, pkt->skb, pkt->in,
+	nf_log_packet(pkt->net, pkt->pf, pkt->hook, pkt->skb, pkt->in,
 		      pkt->out, &priv->loginfo, "%s", priv->prefix);
 }
 
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index cb2f13e..e4ad2c2 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -42,7 +42,7 @@
 		*(__be16 *)dest = skb->protocol;
 		break;
 	case NFT_META_NFPROTO:
-		*dest = pkt->ops->pf;
+		*dest = pkt->pf;
 		break;
 	case NFT_META_L4PROTO:
 		*dest = pkt->tprot;
@@ -135,7 +135,7 @@
 			break;
 		}
 
-		switch (pkt->ops->pf) {
+		switch (pkt->pf) {
 		case NFPROTO_IPV4:
 			if (ipv4_is_multicast(ip_hdr(skb)->daddr))
 				*dest = PACKET_MULTICAST;
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index 96805d2..61d216e 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -42,7 +42,7 @@
 			queue = priv->queuenum + cpu % priv->queues_total;
 		} else {
 			queue = nfqueue_hash(pkt->skb, queue,
-					     priv->queues_total, pkt->ops->pf,
+					     priv->queues_total, pkt->pf,
 					     jhash_initval);
 		}
 	}
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index 635dbba..759ca52 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -22,38 +22,37 @@
 				 const struct nft_pktinfo *pkt)
 {
 	struct nft_reject *priv = nft_expr_priv(expr);
-	struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
 
-	switch (pkt->ops->pf) {
+	switch (pkt->pf) {
 	case NFPROTO_IPV4:
 		switch (priv->type) {
 		case NFT_REJECT_ICMP_UNREACH:
 			nf_send_unreach(pkt->skb, priv->icmp_code,
-					pkt->ops->hooknum);
+					pkt->hook);
 			break;
 		case NFT_REJECT_TCP_RST:
-			nf_send_reset(pkt->skb, pkt->ops->hooknum);
+			nf_send_reset(pkt->net, pkt->skb, pkt->hook);
 			break;
 		case NFT_REJECT_ICMPX_UNREACH:
 			nf_send_unreach(pkt->skb,
 					nft_reject_icmp_code(priv->icmp_code),
-					pkt->ops->hooknum);
+					pkt->hook);
 			break;
 		}
 		break;
 	case NFPROTO_IPV6:
 		switch (priv->type) {
 		case NFT_REJECT_ICMP_UNREACH:
-			nf_send_unreach6(net, pkt->skb, priv->icmp_code,
-					 pkt->ops->hooknum);
+			nf_send_unreach6(pkt->net, pkt->skb, priv->icmp_code,
+					 pkt->hook);
 			break;
 		case NFT_REJECT_TCP_RST:
-			nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
+			nf_send_reset6(pkt->net, pkt->skb, pkt->hook);
 			break;
 		case NFT_REJECT_ICMPX_UNREACH:
-			nf_send_unreach6(net, pkt->skb,
+			nf_send_unreach6(pkt->net, pkt->skb,
 					 nft_reject_icmpv6_code(priv->icmp_code),
-					 pkt->ops->hooknum);
+					 pkt->hook);
 			break;
 		}
 		break;
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
index c13b794..1763ab8 100644
--- a/net/netfilter/xt_LOG.c
+++ b/net/netfilter/xt_LOG.c
@@ -33,7 +33,7 @@
 {
 	const struct xt_log_info *loginfo = par->targinfo;
 	struct nf_loginfo li;
-	struct net *net = dev_net(par->in ? par->in : par->out);
+	struct net *net = par->net;
 
 	li.type = NF_LOG_TYPE_LOG;
 	li.u.log.level = loginfo->level;
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index fb7497c..a1fa2c8 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -26,7 +26,7 @@
 {
 	const struct xt_nflog_info *info = par->targinfo;
 	struct nf_loginfo li;
-	struct net *net = dev_net(par->in ? par->in : par->out);
+	struct net *net = par->net;
 
 	li.type		     = NF_LOG_TYPE_ULOG;
 	li.u.ulog.copy_len   = info->len;
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 8c02501..b7c43de 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -108,7 +108,7 @@
 		return -1;
 
 	if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
-		struct net *net = dev_net(par->in ? par->in : par->out);
+		struct net *net = par->net;
 		unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family);
 
 		if (dst_mtu(skb_dst(skb)) <= minlen) {
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index fd980aa..899b061 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -32,7 +32,7 @@
 {
 	const struct xt_tee_tginfo *info = par->targinfo;
 
-	nf_dup_ipv4(skb, par->hooknum, &info->gw.in, info->priv->oif);
+	nf_dup_ipv4(par->net, skb, par->hooknum, &info->gw.in, info->priv->oif);
 
 	return XT_CONTINUE;
 }
@@ -43,7 +43,7 @@
 {
 	const struct xt_tee_tginfo *info = par->targinfo;
 
-	nf_dup_ipv6(skb, par->hooknum, &info->gw.in6, info->priv->oif);
+	nf_dup_ipv6(par->net, skb, par->hooknum, &info->gw.in6, info->priv->oif);
 
 	return XT_CONTINUE;
 }
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index d0c96c5..3ab591e 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -250,8 +250,8 @@
  * no such listener is found, or NULL if the TCP header is incomplete.
  */
 static struct sock *
-tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport,
-			struct sock *sk)
+tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
+			 __be32 laddr, __be16 lport, struct sock *sk)
 {
 	const struct iphdr *iph = ip_hdr(skb);
 	struct tcphdr _hdr, *hp;
@@ -267,7 +267,7 @@
 		 * to a listener socket if there's one */
 		struct sock *sk2;
 
-		sk2 = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol,
+		sk2 = nf_tproxy_get_sock_v4(net, iph->protocol,
 					    iph->saddr, laddr ? laddr : iph->daddr,
 					    hp->source, lport ? lport : hp->dest,
 					    skb->dev, NFT_LOOKUP_LISTENER);
@@ -290,7 +290,7 @@
 }
 
 static unsigned int
-tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
+tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
 	   u_int32_t mark_mask, u_int32_t mark_value)
 {
 	const struct iphdr *iph = ip_hdr(skb);
@@ -305,7 +305,7 @@
 	 * addresses, this happens if the redirect already happened
 	 * and the current packet belongs to an already established
 	 * connection */
-	sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol,
+	sk = nf_tproxy_get_sock_v4(net, iph->protocol,
 				   iph->saddr, iph->daddr,
 				   hp->source, hp->dest,
 				   skb->dev, NFT_LOOKUP_ESTABLISHED);
@@ -317,11 +317,11 @@
 	/* UDP has no TCP_TIME_WAIT state, so we never enter here */
 	if (sk && sk->sk_state == TCP_TIME_WAIT)
 		/* reopening a TIME_WAIT connection needs special handling */
-		sk = tproxy_handle_time_wait4(skb, laddr, lport, sk);
+		sk = tproxy_handle_time_wait4(net, skb, laddr, lport, sk);
 	else if (!sk)
 		/* no, there's no established connection, check if
 		 * there's a listener on the redirected addr/port */
-		sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol,
+		sk = nf_tproxy_get_sock_v4(net, iph->protocol,
 					   iph->saddr, laddr,
 					   hp->source, lport,
 					   skb->dev, NFT_LOOKUP_LISTENER);
@@ -351,7 +351,7 @@
 {
 	const struct xt_tproxy_target_info *tgi = par->targinfo;
 
-	return tproxy_tg4(skb, tgi->laddr, tgi->lport, tgi->mark_mask, tgi->mark_value);
+	return tproxy_tg4(par->net, skb, tgi->laddr, tgi->lport, tgi->mark_mask, tgi->mark_value);
 }
 
 static unsigned int
@@ -359,7 +359,7 @@
 {
 	const struct xt_tproxy_target_info_v1 *tgi = par->targinfo;
 
-	return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value);
+	return tproxy_tg4(par->net, skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value);
 }
 
 #ifdef XT_TPROXY_HAVE_IPV6
@@ -429,7 +429,7 @@
 		 * to a listener socket if there's one */
 		struct sock *sk2;
 
-		sk2 = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
+		sk2 = nf_tproxy_get_sock_v6(par->net, tproto,
 					    &iph->saddr,
 					    tproxy_laddr6(skb, &tgi->laddr.in6, &iph->daddr),
 					    hp->source,
@@ -472,7 +472,7 @@
 	 * addresses, this happens if the redirect already happened
 	 * and the current packet belongs to an already established
 	 * connection */
-	sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
+	sk = nf_tproxy_get_sock_v6(par->net, tproto,
 				   &iph->saddr, &iph->daddr,
 				   hp->source, hp->dest,
 				   par->in, NFT_LOOKUP_ESTABLISHED);
@@ -487,7 +487,7 @@
 	else if (!sk)
 		/* no there's no established connection, check if
 		 * there's a listener on the redirected addr/port */
-		sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
+		sk = nf_tproxy_get_sock_v6(par->net, tproto,
 					   &iph->saddr, laddr,
 					   hp->source, lport,
 					   par->in, NFT_LOOKUP_LISTENER);
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 5b4743c..11d6091 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -125,7 +125,7 @@
 static bool
 addrtype_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
 {
-	struct net *net = dev_net(par->in ? par->in : par->out);
+	struct net *net = par->net;
 	const struct xt_addrtype_info *info = par->matchinfo;
 	const struct iphdr *iph = ip_hdr(skb);
 	bool ret = true;
@@ -143,7 +143,7 @@
 static bool
 addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
 {
-	struct net *net = dev_net(par->in ? par->in : par->out);
+	struct net *net = par->net;
 	const struct xt_addrtype_info_v1 *info = par->matchinfo;
 	const struct iphdr *iph;
 	const struct net_device *dev = NULL;
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 075d89d..99bbc82 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -317,7 +317,7 @@
 static bool
 connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
-	struct net *net = dev_net(par->in ? par->in : par->out);
+	struct net *net = par->net;
 	const struct xt_connlimit_info *info = par->matchinfo;
 	union nf_inet_addr addr;
 	struct nf_conntrack_tuple tuple;
@@ -332,7 +332,7 @@
 		tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
 		zone = nf_ct_zone(ct);
 	} else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
-				    par->family, &tuple)) {
+				      par->family, net, &tuple)) {
 		goto hotdrop;
 	}
 
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c
index 8d47c37..71a9d95 100644
--- a/net/netfilter/xt_ipvs.c
+++ b/net/netfilter/xt_ipvs.c
@@ -48,6 +48,7 @@
 ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct xt_ipvs_mtinfo *data = par->matchinfo;
+	struct netns_ipvs *ipvs = net_ipvs(par->net);
 	/* ipvs_mt_check ensures that family is only NFPROTO_IPV[46]. */
 	const u_int8_t family = par->family;
 	struct ip_vs_iphdr iph;
@@ -67,7 +68,7 @@
 		goto out;
 	}
 
-	ip_vs_fill_iph_skb(family, skb, &iph);
+	ip_vs_fill_iph_skb(family, skb, true, &iph);
 
 	if (data->bitmask & XT_IPVS_PROTO)
 		if ((iph.protocol == data->l4proto) ^
@@ -85,7 +86,7 @@
 	/*
 	 * Check if the packet belongs to an existing entry
 	 */
-	cp = pp->conn_out_get(family, skb, &iph, 1 /* inverse */);
+	cp = pp->conn_out_get(ipvs, family, skb, &iph);
 	if (unlikely(cp == NULL)) {
 		match = false;
 		goto out;
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 0778855..df8801e 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -200,7 +200,7 @@
 	unsigned char opts[MAX_IPOPTLEN];
 	const struct xt_osf_finger *kf;
 	const struct xt_osf_user_finger *f;
-	struct net *net = dev_net(p->in ? p->in : p->out);
+	struct net *net = p->net;
 
 	if (!info)
 		return false;
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 45e1b30..d725a27 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -237,7 +237,7 @@
 static bool
 recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
-	struct net *net = dev_net(par->in ? par->in : par->out);
+	struct net *net = par->net;
 	struct recent_net *recent_net = recent_pernet(net);
 	const struct xt_recent_mtinfo_v1 *info = par->matchinfo;
 	struct recent_table *t;
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 43e26c8..2ec08f0 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -143,7 +143,8 @@
 	}
 }
 
-static struct sock *xt_socket_lookup_slow_v4(const struct sk_buff *skb,
+static struct sock *xt_socket_lookup_slow_v4(struct net *net,
+					     const struct sk_buff *skb,
 					     const struct net_device *indev)
 {
 	const struct iphdr *iph = ip_hdr(skb);
@@ -197,7 +198,7 @@
 	}
 #endif
 
-	return xt_socket_get_sock_v4(dev_net(skb->dev), protocol, saddr, daddr,
+	return xt_socket_get_sock_v4(net, protocol, saddr, daddr,
 				     sport, dport, indev);
 }
 
@@ -209,7 +210,7 @@
 	struct sock *sk = skb->sk;
 
 	if (!sk)
-		sk = xt_socket_lookup_slow_v4(skb, par->in);
+		sk = xt_socket_lookup_slow_v4(par->net, skb, par->in);
 	if (sk) {
 		bool wildcard;
 		bool transparent = true;
@@ -335,7 +336,8 @@
 	return NULL;
 }
 
-static struct sock *xt_socket_lookup_slow_v6(const struct sk_buff *skb,
+static struct sock *xt_socket_lookup_slow_v6(struct net *net,
+					     const struct sk_buff *skb,
 					     const struct net_device *indev)
 {
 	__be16 uninitialized_var(dport), uninitialized_var(sport);
@@ -371,7 +373,7 @@
 		return NULL;
 	}
 
-	return xt_socket_get_sock_v6(dev_net(skb->dev), tproto, saddr, daddr,
+	return xt_socket_get_sock_v6(net, tproto, saddr, daddr,
 				     sport, dport, indev);
 }
 
@@ -383,7 +385,7 @@
 	struct sock *sk = skb->sk;
 
 	if (!sk)
-		sk = xt_socket_lookup_slow_v6(skb, par->in);
+		sk = xt_socket_lookup_slow_v6(par->net, skb, par->in);
 	if (sk) {
 		bool wildcard;
 		bool transparent = true;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 7f86d3b..8f060d7 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -125,6 +125,24 @@
 	return group ? 1 << (group - 1) : 0;
 }
 
+static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
+					   gfp_t gfp_mask)
+{
+	unsigned int len = skb_end_offset(skb);
+	struct sk_buff *new;
+
+	new = alloc_skb(len, gfp_mask);
+	if (new == NULL)
+		return NULL;
+
+	NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
+	NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
+	NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
+
+	memcpy(skb_put(new, len), skb->data, len);
+	return new;
+}
+
 int netlink_add_tap(struct netlink_tap *nt)
 {
 	if (unlikely(nt->dev->type != ARPHRD_NETLINK))
@@ -206,7 +224,11 @@
 	int ret = -ENOMEM;
 
 	dev_hold(dev);
-	nskb = skb_clone(skb, GFP_ATOMIC);
+
+	if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
+		nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
+	else
+		nskb = skb_clone(skb, GFP_ATOMIC);
 	if (nskb) {
 		nskb->dev = dev;
 		nskb->protocol = htons((u16) sk->sk_protocol);
@@ -279,11 +301,6 @@
 }
 
 #ifdef CONFIG_NETLINK_MMAP
-static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
-{
-	return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
-}
-
 static bool netlink_rx_is_mmaped(struct sock *sk)
 {
 	return nlk_sk(sk)->rx_ring.pg_vec != NULL;
@@ -846,7 +863,6 @@
 }
 
 #else /* CONFIG_NETLINK_MMAP */
-#define netlink_skb_is_mmaped(skb)	false
 #define netlink_rx_is_mmaped(sk)	false
 #define netlink_tx_is_mmaped(sk)	false
 #define netlink_mmap			sock_no_mmap
@@ -1094,8 +1110,8 @@
 
 	lock_sock(sk);
 
-	err = -EBUSY;
-	if (nlk_sk(sk)->portid)
+	err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
+	if (nlk_sk(sk)->bound)
 		goto err;
 
 	err = -ENOMEM;
@@ -1115,10 +1131,14 @@
 			err = -EOVERFLOW;
 		if (err == -EEXIST)
 			err = -EADDRINUSE;
-		nlk_sk(sk)->portid = 0;
 		sock_put(sk);
+		goto err;
 	}
 
+	/* We need to ensure that the socket is hashed and visible. */
+	smp_wmb();
+	nlk_sk(sk)->bound = portid;
+
 err:
 	release_sock(sk);
 	return err;
@@ -1503,6 +1523,7 @@
 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
 	int err;
 	long unsigned int groups = nladdr->nl_groups;
+	bool bound;
 
 	if (addr_len < sizeof(struct sockaddr_nl))
 		return -EINVAL;
@@ -1519,9 +1540,14 @@
 			return err;
 	}
 
-	if (nlk->portid)
+	bound = nlk->bound;
+	if (bound) {
+		/* Ensure nlk->portid is up-to-date. */
+		smp_rmb();
+
 		if (nladdr->nl_pid != nlk->portid)
 			return -EINVAL;
+	}
 
 	if (nlk->netlink_bind && groups) {
 		int group;
@@ -1537,7 +1563,10 @@
 		}
 	}
 
-	if (!nlk->portid) {
+	/* No need for barriers here as we return to user-space without
+	 * using any of the bound attributes.
+	 */
+	if (!bound) {
 		err = nladdr->nl_pid ?
 			netlink_insert(sk, nladdr->nl_pid) :
 			netlink_autobind(sock);
@@ -1585,7 +1614,10 @@
 	    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
 		return -EPERM;
 
-	if (!nlk->portid)
+	/* No need for barriers here as we return to user-space without
+	 * using any of the bound attributes.
+	 */
+	if (!nlk->bound)
 		err = netlink_autobind(sock);
 
 	if (err == 0) {
@@ -2426,10 +2458,13 @@
 		dst_group = nlk->dst_group;
 	}
 
-	if (!nlk->portid) {
+	if (!nlk->bound) {
 		err = netlink_autobind(sock);
 		if (err)
 			goto out;
+	} else {
+		/* Ensure nlk is hashed and visible. */
+		smp_rmb();
 	}
 
 	/* It's a really convoluted way for userland to ask for mmaped
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index 8900840..14437d9 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -35,6 +35,7 @@
 	unsigned long		state;
 	size_t			max_recvmsg_len;
 	wait_queue_head_t	wait;
+	bool			bound;
 	bool			cb_running;
 	struct netlink_callback	cb;
 	struct mutex		*cb_mutex;
@@ -59,6 +60,15 @@
 	return container_of(sk, struct netlink_sock, sk);
 }
 
+static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NETLINK_MMAP
+	return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
+#else
+	return false;
+#endif /* CONFIG_NETLINK_MMAP */
+}
+
 struct netlink_table {
 	struct rhashtable	hash;
 	struct hlist_head	mc_list;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 2ed5f96..bc0e504 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -39,7 +39,7 @@
 EXPORT_SYMBOL(genl_unlock);
 
 #ifdef CONFIG_LOCKDEP
-int lockdep_genl_is_held(void)
+bool lockdep_genl_is_held(void)
 {
 	return lockdep_is_held(&genl_mutex);
 }
@@ -1136,19 +1136,19 @@
 }
 EXPORT_SYMBOL(genlmsg_multicast_allns);
 
-void genl_notify(struct genl_family *family,
-		 struct sk_buff *skb, struct net *net, u32 portid, u32 group,
-		 struct nlmsghdr *nlh, gfp_t flags)
+void genl_notify(struct genl_family *family, struct sk_buff *skb,
+		 struct genl_info *info, u32 group, gfp_t flags)
 {
+	struct net *net = genl_info_net(info);
 	struct sock *sk = net->genl_sock;
 	int report = 0;
 
-	if (nlh)
-		report = nlmsg_report(nlh);
+	if (info->nlhdr)
+		report = nlmsg_report(info->nlhdr);
 
 	if (WARN_ON_ONCE(group >= family->n_mcgrps))
 		return;
 	group = family->mcgrp_offset + group;
-	nlmsg_notify(sk, skb, portid, group, report, flags);
+	nlmsg_notify(sk, skb, info->snd_portid, group, report, flags);
 }
 EXPORT_SYMBOL(genl_notify);
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 2a071f4..d143aa9 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -5,7 +5,8 @@
 config OPENVSWITCH
 	tristate "Open vSwitch"
 	depends on INET
-	depends on (!NF_CONNTRACK || NF_CONNTRACK)
+	depends on !NF_CONNTRACK || \
+		   (NF_CONNTRACK && (!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6))
 	select LIBCRC32C
 	select MPLS
 	select NET_MPLS_GSO
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 315f533..1d21ab9 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -620,7 +620,7 @@
 	return 0;
 }
 
-static int ovs_vport_output(struct sock *sock, struct sk_buff *skb)
+static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
 	struct vport *vport = data->vport;
@@ -679,8 +679,8 @@
 	skb_pull(skb, hlen);
 }
 
-static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
-			 __be16 ethertype)
+static void ovs_fragment(struct net *net, struct vport *vport,
+			 struct sk_buff *skb, u16 mru, __be16 ethertype)
 {
 	if (skb_network_offset(skb) > MAX_L2_LEN) {
 		OVS_NLERR(1, "L2 header too long to fragment");
@@ -700,7 +700,7 @@
 		skb_dst_set_noref(skb, &ovs_dst);
 		IPCB(skb)->frag_max_size = mru;
 
-		ip_do_fragment(skb->sk, skb, ovs_vport_output);
+		ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
 		refdst_drop(orig_dst);
 	} else if (ethertype == htons(ETH_P_IPV6)) {
 		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
@@ -722,7 +722,7 @@
 		skb_dst_set_noref(skb, &ovs_rt.dst);
 		IP6CB(skb)->frag_max_size = mru;
 
-		v6ops->fragment(skb->sk, skb, ovs_vport_output);
+		v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
 		refdst_drop(orig_dst);
 	} else {
 		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
@@ -743,6 +743,7 @@
 		if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
 			ovs_vport_send(vport, skb);
 		} else if (mru <= vport->dev->mtu) {
+			struct net *net = read_pnet(&dp->net);
 			__be16 ethertype = key->eth.type;
 
 			if (!is_flow_key_valid(key)) {
@@ -752,7 +753,7 @@
 					ethertype = vlan_get_protocol(skb);
 			}
 
-			ovs_fragment(vport, skb, mru, ethertype);
+			ovs_fragment(net, vport, skb, mru, ethertype);
 		} else {
 			kfree_skb(skb);
 		}
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index e8e524a..ad61426 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -275,13 +275,15 @@
 	case NFPROTO_IPV6: {
 		u8 nexthdr = ipv6_hdr(skb)->nexthdr;
 		__be16 frag_off;
+		int ofs;
 
-		protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
-					   &nexthdr, &frag_off);
-		if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
+		ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+				       &frag_off);
+		if (ofs < 0 || (frag_off & htons(~0x7)) != 0) {
 			pr_debug("proto header not found\n");
 			return NF_ACCEPT;
 		}
+		protoff = ofs;
 		break;
 	}
 	default:
@@ -302,7 +304,7 @@
 		int err;
 
 		memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
-		err = ip_defrag(skb, user);
+		err = ip_defrag(net, skb, user);
 		if (err)
 			return err;
 
@@ -313,7 +315,7 @@
 		struct sk_buff *reasm;
 
 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
-		reasm = nf_ct_frag6_gather(skb, user);
+		reasm = nf_ct_frag6_gather(net, skb, user);
 		if (!reasm)
 			return -EINPROGRESS;
 
@@ -345,7 +347,7 @@
 {
 	struct nf_conntrack_tuple tuple;
 
-	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, &tuple))
+	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple))
 		return NULL;
 	return __nf_ct_expect_find(net, zone, &tuple);
 }
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 6fbd2de..a758280 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -91,8 +91,7 @@
 static void ovs_notify(struct genl_family *family,
 		       struct sk_buff *skb, struct genl_info *info)
 {
-	genl_notify(family, skb, genl_info_net(info), info->snd_portid,
-		    0, info->nlhdr, GFP_KERNEL);
+	genl_notify(family, skb, info, 0, GFP_KERNEL);
 }
 
 /**
@@ -952,7 +951,7 @@
 	if (error)
 		goto err_kfree_flow;
 
-	ovs_flow_mask_key(&new_flow->key, &key, &mask);
+	ovs_flow_mask_key(&new_flow->key, &key, true, &mask);
 
 	/* Extract flow identifier. */
 	error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
@@ -1080,7 +1079,7 @@
 	struct sw_flow_key masked_key;
 	int error;
 
-	ovs_flow_mask_key(&masked_key, key, mask);
+	ovs_flow_mask_key(&masked_key, key, true, mask);
 	error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
 	if (error) {
 		OVS_NLERR(log,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index c8db44a..0ea128e 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -698,8 +698,7 @@
 {
 	/* Extract metadata from packet. */
 	if (tun_info) {
-		if (ip_tunnel_info_af(tun_info) != AF_INET)
-			return -EINVAL;
+		key->tun_proto = ip_tunnel_info_af(tun_info);
 		memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
 
 		if (tun_info->options_len) {
@@ -714,6 +713,7 @@
 			key->tun_opts_len = 0;
 		}
 	} else  {
+		key->tun_proto = 0;
 		key->tun_opts_len = 0;
 		memset(&key->tun_key, 0, sizeof(key->tun_key));
 	}
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index fe527d2..5688e33 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -63,6 +63,7 @@
 		u32	skb_mark;	/* SKB mark. */
 		u16	in_port;	/* Input switch port (or DP_MAX_PORTS). */
 	} __packed phy; /* Safe when right after 'tun_key'. */
+	u8 tun_proto;			/* Protocol of encapsulating tunnel. */
 	u32 ovs_flow_hash;		/* Datapath computed hash value.  */
 	u32 recirc_id;			/* Recirculation ID.  */
 	struct {
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index c92d6a2..77850f1 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -57,6 +57,7 @@
 };
 
 #define OVS_ATTR_NESTED -1
+#define OVS_ATTR_VARIABLE -2
 
 static void update_range(struct sw_flow_match *match,
 			 size_t offset, size_t size, bool is_mask)
@@ -261,8 +262,8 @@
 	 * updating this function.
 	 */
 	return    nla_total_size(8)    /* OVS_TUNNEL_KEY_ATTR_ID */
-		+ nla_total_size(4)    /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
-		+ nla_total_size(4)    /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
+		+ nla_total_size(16)   /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */
+		+ nla_total_size(16)   /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */
 		+ nla_total_size(1)    /* OVS_TUNNEL_KEY_ATTR_TOS */
 		+ nla_total_size(1)    /* OVS_TUNNEL_KEY_ATTR_TTL */
 		+ nla_total_size(0)    /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
@@ -304,6 +305,10 @@
 		+ nla_total_size(28); /* OVS_KEY_ATTR_ND */
 }
 
+static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = {
+	[OVS_VXLAN_EXT_GBP]	    = { .len = sizeof(u32) },
+};
+
 static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
 	[OVS_TUNNEL_KEY_ATTR_ID]	    = { .len = sizeof(u64) },
 	[OVS_TUNNEL_KEY_ATTR_IPV4_SRC]	    = { .len = sizeof(u32) },
@@ -315,8 +320,11 @@
 	[OVS_TUNNEL_KEY_ATTR_TP_SRC]	    = { .len = sizeof(u16) },
 	[OVS_TUNNEL_KEY_ATTR_TP_DST]	    = { .len = sizeof(u16) },
 	[OVS_TUNNEL_KEY_ATTR_OAM]	    = { .len = 0 },
-	[OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS]   = { .len = OVS_ATTR_NESTED },
-	[OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS]    = { .len = OVS_ATTR_NESTED },
+	[OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS]   = { .len = OVS_ATTR_VARIABLE },
+	[OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS]    = { .len = OVS_ATTR_NESTED,
+						.next = ovs_vxlan_ext_key_lens },
+	[OVS_TUNNEL_KEY_ATTR_IPV6_SRC]      = { .len = sizeof(struct in6_addr) },
+	[OVS_TUNNEL_KEY_ATTR_IPV6_DST]      = { .len = sizeof(struct in6_addr) },
 };
 
 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
@@ -349,6 +357,13 @@
 	[OVS_KEY_ATTR_CT_LABEL]	 = { .len = sizeof(struct ovs_key_ct_label) },
 };
 
+static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
+{
+	return expected_len == attr_len ||
+	       expected_len == OVS_ATTR_NESTED ||
+	       expected_len == OVS_ATTR_VARIABLE;
+}
+
 static bool is_all_zero(const u8 *fp, size_t size)
 {
 	int i;
@@ -388,7 +403,7 @@
 		}
 
 		expected_len = ovs_key_lens[type].len;
-		if (nla_len(nla) != expected_len && expected_len != OVS_ATTR_NESTED) {
+		if (!check_attr_len(nla_len(nla), expected_len)) {
 			OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
 				  type, nla_len(nla), expected_len);
 			return -EINVAL;
@@ -473,29 +488,50 @@
 	return 0;
 }
 
-static const struct nla_policy vxlan_opt_policy[OVS_VXLAN_EXT_MAX + 1] = {
-	[OVS_VXLAN_EXT_GBP]	= { .type = NLA_U32 },
-};
-
-static int vxlan_tun_opt_from_nlattr(const struct nlattr *a,
+static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
 				     struct sw_flow_match *match, bool is_mask,
 				     bool log)
 {
-	struct nlattr *tb[OVS_VXLAN_EXT_MAX+1];
+	struct nlattr *a;
+	int rem;
 	unsigned long opt_key_offset;
 	struct vxlan_metadata opts;
-	int err;
 
 	BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
 
-	err = nla_parse_nested(tb, OVS_VXLAN_EXT_MAX, a, vxlan_opt_policy);
-	if (err < 0)
-		return err;
-
 	memset(&opts, 0, sizeof(opts));
+	nla_for_each_nested(a, attr, rem) {
+		int type = nla_type(a);
 
-	if (tb[OVS_VXLAN_EXT_GBP])
-		opts.gbp = nla_get_u32(tb[OVS_VXLAN_EXT_GBP]);
+		if (type > OVS_VXLAN_EXT_MAX) {
+			OVS_NLERR(log, "VXLAN extension %d out of range max %d",
+				  type, OVS_VXLAN_EXT_MAX);
+			return -EINVAL;
+		}
+
+		if (!check_attr_len(nla_len(a),
+				    ovs_vxlan_ext_key_lens[type].len)) {
+			OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d",
+				  type, nla_len(a),
+				  ovs_vxlan_ext_key_lens[type].len);
+			return -EINVAL;
+		}
+
+		switch (type) {
+		case OVS_VXLAN_EXT_GBP:
+			opts.gbp = nla_get_u32(a);
+			break;
+		default:
+			OVS_NLERR(log, "Unknown VXLAN extension attribute %d",
+				  type);
+			return -EINVAL;
+		}
+	}
+	if (rem) {
+		OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.",
+			  rem);
+		return -EINVAL;
+	}
 
 	if (!is_mask)
 		SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
@@ -508,14 +544,14 @@
 	return 0;
 }
 
-static int ipv4_tun_from_nlattr(const struct nlattr *attr,
-				struct sw_flow_match *match, bool is_mask,
-				bool log)
+static int ip_tun_from_nlattr(const struct nlattr *attr,
+			      struct sw_flow_match *match, bool is_mask,
+			      bool log)
 {
 	struct nlattr *a;
 	int rem;
 	bool ttl = false;
-	__be16 tun_flags = 0;
+	__be16 tun_flags = 0, ipv4 = false, ipv6 = false;
 	int opts_type = 0;
 
 	nla_for_each_nested(a, attr, rem) {
@@ -528,8 +564,8 @@
 			return -EINVAL;
 		}
 
-		if (ovs_tunnel_key_lens[type].len != nla_len(a) &&
-		    ovs_tunnel_key_lens[type].len != OVS_ATTR_NESTED) {
+		if (!check_attr_len(nla_len(a),
+				    ovs_tunnel_key_lens[type].len)) {
 			OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
 				  type, nla_len(a), ovs_tunnel_key_lens[type].len);
 			return -EINVAL;
@@ -544,10 +580,22 @@
 		case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
 			SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
 					nla_get_in_addr(a), is_mask);
+			ipv4 = true;
 			break;
 		case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
 			SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
 					nla_get_in_addr(a), is_mask);
+			ipv4 = true;
+			break;
+		case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
+			SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
+					nla_get_in6_addr(a), is_mask);
+			ipv6 = true;
+			break;
+		case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
+			SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
+					nla_get_in6_addr(a), is_mask);
+			ipv6 = true;
 			break;
 		case OVS_TUNNEL_KEY_ATTR_TOS:
 			SW_FLOW_KEY_PUT(match, tun_key.tos,
@@ -602,28 +650,46 @@
 			opts_type = type;
 			break;
 		default:
-			OVS_NLERR(log, "Unknown IPv4 tunnel attribute %d",
+			OVS_NLERR(log, "Unknown IP tunnel attribute %d",
 				  type);
 			return -EINVAL;
 		}
 	}
 
 	SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
+	if (is_mask)
+		SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true);
+	else
+		SW_FLOW_KEY_PUT(match, tun_proto, ipv6 ? AF_INET6 : AF_INET,
+				false);
 
 	if (rem > 0) {
-		OVS_NLERR(log, "IPv4 tunnel attribute has %d unknown bytes.",
+		OVS_NLERR(log, "IP tunnel attribute has %d unknown bytes.",
 			  rem);
 		return -EINVAL;
 	}
 
+	if (ipv4 && ipv6) {
+		OVS_NLERR(log, "Mixed IPv4 and IPv6 tunnel attributes");
+		return -EINVAL;
+	}
+
 	if (!is_mask) {
-		if (!match->key->tun_key.u.ipv4.dst) {
+		if (!ipv4 && !ipv6) {
+			OVS_NLERR(log, "IP tunnel dst address not specified");
+			return -EINVAL;
+		}
+		if (ipv4 && !match->key->tun_key.u.ipv4.dst) {
 			OVS_NLERR(log, "IPv4 tunnel dst address is zero");
 			return -EINVAL;
 		}
+		if (ipv6 && ipv6_addr_any(&match->key->tun_key.u.ipv6.dst)) {
+			OVS_NLERR(log, "IPv6 tunnel dst address is zero");
+			return -EINVAL;
+		}
 
 		if (!ttl) {
-			OVS_NLERR(log, "IPv4 tunnel TTL not specified.");
+			OVS_NLERR(log, "IP tunnel TTL not specified.");
 			return -EINVAL;
 		}
 	}
@@ -648,21 +714,36 @@
 	return 0;
 }
 
-static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
-				const struct ip_tunnel_key *output,
-				const void *tun_opts, int swkey_tun_opts_len)
+static int __ip_tun_to_nlattr(struct sk_buff *skb,
+			      const struct ip_tunnel_key *output,
+			      const void *tun_opts, int swkey_tun_opts_len,
+			      unsigned short tun_proto)
 {
 	if (output->tun_flags & TUNNEL_KEY &&
 	    nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
 		return -EMSGSIZE;
-	if (output->u.ipv4.src &&
-	    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
-			    output->u.ipv4.src))
-		return -EMSGSIZE;
-	if (output->u.ipv4.dst &&
-	    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
-			    output->u.ipv4.dst))
-		return -EMSGSIZE;
+	switch (tun_proto) {
+	case AF_INET:
+		if (output->u.ipv4.src &&
+		    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
+				    output->u.ipv4.src))
+			return -EMSGSIZE;
+		if (output->u.ipv4.dst &&
+		    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
+				    output->u.ipv4.dst))
+			return -EMSGSIZE;
+		break;
+	case AF_INET6:
+		if (!ipv6_addr_any(&output->u.ipv6.src) &&
+		    nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
+				     &output->u.ipv6.src))
+			return -EMSGSIZE;
+		if (!ipv6_addr_any(&output->u.ipv6.dst) &&
+		    nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
+				     &output->u.ipv6.dst))
+			return -EMSGSIZE;
+		break;
+	}
 	if (output->tos &&
 	    nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
 		return -EMSGSIZE;
@@ -696,9 +777,10 @@
 	return 0;
 }
 
-static int ipv4_tun_to_nlattr(struct sk_buff *skb,
-			      const struct ip_tunnel_key *output,
-			      const void *tun_opts, int swkey_tun_opts_len)
+static int ip_tun_to_nlattr(struct sk_buff *skb,
+			    const struct ip_tunnel_key *output,
+			    const void *tun_opts, int swkey_tun_opts_len,
+			    unsigned short tun_proto)
 {
 	struct nlattr *nla;
 	int err;
@@ -707,7 +789,8 @@
 	if (!nla)
 		return -EMSGSIZE;
 
-	err = __ipv4_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len);
+	err = __ip_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len,
+				 tun_proto);
 	if (err)
 		return err;
 
@@ -719,9 +802,10 @@
 				  const struct ip_tunnel_info *egress_tun_info,
 				  const void *egress_tun_opts)
 {
-	return __ipv4_tun_to_nlattr(skb, &egress_tun_info->key,
-				    egress_tun_opts,
-				    egress_tun_info->options_len);
+	return __ip_tun_to_nlattr(skb, &egress_tun_info->key,
+				  egress_tun_opts,
+				  egress_tun_info->options_len,
+				  ip_tunnel_info_af(egress_tun_info));
 }
 
 static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
@@ -772,8 +856,8 @@
 		*attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
 	}
 	if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
-		if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
-					 is_mask, log) < 0)
+		if (ip_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
+				       is_mask, log) < 0)
 			return -EINVAL;
 		*attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
 	}
@@ -1052,10 +1136,13 @@
 
 	/* The nlattr stream should already have been validated */
 	nla_for_each_nested(nla, attr, rem) {
-		if (tbl && tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
-			nlattr_set(nla, val, tbl[nla_type(nla)].next);
-		else
+		if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
+			if (tbl[nla_type(nla)].next)
+				tbl = tbl[nla_type(nla)].next;
+			nlattr_set(nla, val, tbl);
+		} else {
 			memset(nla_data(nla), val, nla_len(nla));
+		}
 	}
 }
 
@@ -1157,7 +1244,7 @@
 			/* The userspace does not send tunnel attributes that
 			 * are 0, but we should not wildcard them nonetheless.
 			 */
-			if (match->key->tun_key.u.ipv4.dst)
+			if (match->key->tun_proto)
 				SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
 							 0xff, true);
 
@@ -1330,14 +1417,14 @@
 	if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
 		goto nla_put_failure;
 
-	if ((swkey->tun_key.u.ipv4.dst || is_mask)) {
+	if ((swkey->tun_proto || is_mask)) {
 		const void *opts = NULL;
 
 		if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
 			opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
 
-		if (ipv4_tun_to_nlattr(skb, &output->tun_key, opts,
-				       swkey->tun_opts_len))
+		if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
+				     swkey->tun_opts_len, swkey->tun_proto))
 			goto nla_put_failure;
 	}
 
@@ -1840,7 +1927,7 @@
 	int err = 0, start, opts_type;
 
 	ovs_match_init(&match, &key, NULL);
-	opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
+	opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
 	if (opts_type < 0)
 		return opts_type;
 
@@ -1876,6 +1963,8 @@
 
 	tun_info = &tun_dst->u.tun_info;
 	tun_info->mode = IP_TUNNEL_INFO_TX;
+	if (key.tun_proto == AF_INET6)
+		tun_info->mode |= IP_TUNNEL_INFO_IPV6;
 	tun_info->key = key.tun_key;
 
 	/* We need to store the options in the action itself since
@@ -1922,8 +2011,7 @@
 		key_len /= 2;
 
 	if (key_type > OVS_KEY_ATTR_MAX ||
-	    (ovs_key_lens[key_type].len != key_len &&
-	     ovs_key_lens[key_type].len != OVS_ATTR_NESTED))
+	    !check_attr_len(key_len, ovs_key_lens[key_type].len))
 		return -EINVAL;
 
 	if (masked && !validate_masked(nla_data(ovs_key), key_len))
@@ -2338,10 +2426,11 @@
 		if (!start)
 			return -EMSGSIZE;
 
-		err = ipv4_tun_to_nlattr(skb, &tun_info->key,
-					 tun_info->options_len ?
+		err = ip_tun_to_nlattr(skb, &tun_info->key,
+				       tun_info->options_len ?
 					     ip_tunnel_info_opts(tun_info) : NULL,
-					 tun_info->options_len);
+				       tun_info->options_len,
+				       ip_tunnel_info_af(tun_info));
 		if (err)
 			return err;
 		nla_nest_end(skb, start);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index d22d8e9..95dbced 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -57,20 +57,21 @@
 }
 
 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
-		       const struct sw_flow_mask *mask)
+		       bool full, const struct sw_flow_mask *mask)
 {
-	const long *m = (const long *)((const u8 *)&mask->key +
-				mask->range.start);
-	const long *s = (const long *)((const u8 *)src +
-				mask->range.start);
-	long *d = (long *)((u8 *)dst + mask->range.start);
+	int start = full ? 0 : mask->range.start;
+	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
+	const long *m = (const long *)((const u8 *)&mask->key + start);
+	const long *s = (const long *)((const u8 *)src + start);
+	long *d = (long *)((u8 *)dst + start);
 	int i;
 
-	/* The memory outside of the 'mask->range' are not set since
-	 * further operations on 'dst' only uses contents within
-	 * 'mask->range'.
+	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
+	 * if 'full' is false the memory outside of the 'mask->range' is left
+	 * uninitialized. This can be used as an optimization when further
+	 * operations on 'dst' only use contents within 'mask->range'.
 	 */
-	for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
+	for (i = 0; i < len; i += sizeof(long))
 		*d++ = *s++ & *m++;
 }
 
@@ -426,7 +427,7 @@
 
 static int flow_key_start(const struct sw_flow_key *key)
 {
-	if (key->tun_key.u.ipv4.dst)
+	if (key->tun_proto)
 		return 0;
 	else
 		return rounddown(offsetof(struct sw_flow_key, phy),
@@ -475,7 +476,7 @@
 	u32 hash;
 	struct sw_flow_key masked_key;
 
-	ovs_flow_mask_key(&masked_key, unmasked, mask);
+	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
 	hash = flow_hash(&masked_key, &mask->range);
 	head = find_bucket(ti, hash);
 	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 616eda1..2dd9900 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -86,5 +86,5 @@
 bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *);
 
 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
-		       const struct sw_flow_mask *mask);
+		       bool full, const struct sw_flow_mask *mask);
 #endif /* flow_table.h */
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index c11413d..fb3cdb8 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -151,7 +151,8 @@
 {
 	struct vxlan_dev *vxlan = netdev_priv(vport->dev);
 	struct net *net = ovs_dp_get_net(vport->dp);
-	__be16 dst_port = vxlan_dev_dst_port(vxlan);
+	unsigned short family = ip_tunnel_info_af(upcall->egress_tun_info);
+	__be16 dst_port = vxlan_dev_dst_port(vxlan, family);
 	__be16 src_port;
 	int port_min;
 	int port_max;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 7b8e39a..691660b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -230,6 +230,8 @@
 	} sa;
 };
 
+#define vio_le() virtio_legacy_is_little_endian()
+
 #define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 
 #define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
@@ -1421,7 +1423,7 @@
 	rcu_read_lock();
 	prog = rcu_dereference(f->bpf_prog);
 	if (prog)
-		ret = BPF_PROG_RUN(prog, skb) % num;
+		ret = bpf_prog_run_clear_cb(prog, skb) % num;
 	rcu_read_unlock();
 
 	return ret;
@@ -1437,17 +1439,17 @@
 {
 	struct packet_fanout *f = pt->af_packet_priv;
 	unsigned int num = READ_ONCE(f->num_members);
+	struct net *net = read_pnet(&f->net);
 	struct packet_sock *po;
 	unsigned int idx;
 
-	if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
-	    !num) {
+	if (!net_eq(dev_net(dev), net) || !num) {
 		kfree_skb(skb);
 		return 0;
 	}
 
 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
-		skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
+		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
 		if (!skb)
 			return 0;
 	}
@@ -1517,10 +1519,10 @@
 
 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
 {
-	if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout)
-		return true;
+	if (sk->sk_family != PF_PACKET)
+		return false;
 
-	return false;
+	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
 }
 
 static void fanout_init_data(struct packet_fanout *f)
@@ -1565,7 +1567,7 @@
 	if (copy_from_user(&fprog, data, len))
 		return -EFAULT;
 
-	ret = bpf_prog_create_from_user(&new, &fprog, NULL);
+	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
 	if (ret)
 		return ret;
 
@@ -1937,16 +1939,16 @@
 	return err;
 }
 
-static unsigned int run_filter(const struct sk_buff *skb,
-				      const struct sock *sk,
-				      unsigned int res)
+static unsigned int run_filter(struct sk_buff *skb,
+			       const struct sock *sk,
+			       unsigned int res)
 {
 	struct sk_filter *filter;
 
 	rcu_read_lock();
 	filter = rcu_dereference(sk->sk_filter);
 	if (filter != NULL)
-		res = SK_RUN_FILTER(filter, skb);
+		res = bpf_prog_run_clear_cb(filter->prog, skb);
 	rcu_read_unlock();
 
 	return res;
@@ -2628,6 +2630,7 @@
 	__be16 proto;
 	unsigned char *addr;
 	int err, reserve = 0;
+	struct sockcm_cookie sockc;
 	struct virtio_net_hdr vnet_hdr = { 0 };
 	int offset = 0;
 	int vnet_hdr_len;
@@ -2663,6 +2666,13 @@
 	if (unlikely(!(dev->flags & IFF_UP)))
 		goto out_unlock;
 
+	sockc.mark = sk->sk_mark;
+	if (msg->msg_controllen) {
+		err = sock_cmsg_send(sk, msg, &sockc);
+		if (unlikely(err))
+			goto out_unlock;
+	}
+
 	if (sock->type == SOCK_RAW)
 		reserve = dev->hard_header_len;
 	if (po->has_vnet_hdr) {
@@ -2680,15 +2690,15 @@
 			goto out_unlock;
 
 		if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
-		    (__virtio16_to_cpu(false, vnet_hdr.csum_start) +
-		     __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 >
-		      __virtio16_to_cpu(false, vnet_hdr.hdr_len)))
-			vnet_hdr.hdr_len = __cpu_to_virtio16(false,
-				 __virtio16_to_cpu(false, vnet_hdr.csum_start) +
-				__virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2);
+		    (__virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
+		     __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2 >
+		      __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len)))
+			vnet_hdr.hdr_len = __cpu_to_virtio16(vio_le(),
+				 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
+				__virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2);
 
 		err = -EINVAL;
-		if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len)
+		if (__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len) > len)
 			goto out_unlock;
 
 		if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
@@ -2731,7 +2741,7 @@
 	hlen = LL_RESERVED_SPACE(dev);
 	tlen = dev->needed_tailroom;
 	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
-			       __virtio16_to_cpu(false, vnet_hdr.hdr_len),
+			       __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
 			       msg->msg_flags & MSG_DONTWAIT, &err);
 	if (skb == NULL)
 		goto out_unlock;
@@ -2772,14 +2782,14 @@
 	skb->protocol = proto;
 	skb->dev = dev;
 	skb->priority = sk->sk_priority;
-	skb->mark = sk->sk_mark;
+	skb->mark = sockc.mark;
 
 	packet_pick_tx_queue(dev, skb);
 
 	if (po->has_vnet_hdr) {
 		if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
-			u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start);
-			u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset);
+			u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start);
+			u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset);
 			if (!skb_partial_csum_set(skb, s, o)) {
 				err = -EINVAL;
 				goto out_free;
@@ -2787,7 +2797,7 @@
 		}
 
 		skb_shinfo(skb)->gso_size =
-			__virtio16_to_cpu(false, vnet_hdr.gso_size);
+			__virtio16_to_cpu(vio_le(), vnet_hdr.gso_size);
 		skb_shinfo(skb)->gso_type = gso_type;
 
 		/* Header must be checked, and gso_segs computed. */
@@ -3161,9 +3171,9 @@
 
 			/* This is a hint as to how much should be linear. */
 			vnet_hdr.hdr_len =
-				__cpu_to_virtio16(false, skb_headlen(skb));
+				__cpu_to_virtio16(vio_le(), skb_headlen(skb));
 			vnet_hdr.gso_size =
-				__cpu_to_virtio16(false, sinfo->gso_size);
+				__cpu_to_virtio16(vio_le(), sinfo->gso_size);
 			if (sinfo->gso_type & SKB_GSO_TCPV4)
 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
 			else if (sinfo->gso_type & SKB_GSO_TCPV6)
@@ -3181,9 +3191,9 @@
 
 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 			vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-			vnet_hdr.csum_start = __cpu_to_virtio16(false,
+			vnet_hdr.csum_start = __cpu_to_virtio16(vio_le(),
 					  skb_checksum_start_offset(skb));
-			vnet_hdr.csum_offset = __cpu_to_virtio16(false,
+			vnet_hdr.csum_offset = __cpu_to_virtio16(vio_le(),
 							 skb->csum_offset);
 		} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 			vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index a2f28a6..384ea1e 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -72,13 +72,7 @@
 	rds_clear_recv_queue(rs);
 	rds_cong_remove_socket(rs);
 
-	/*
-	 * the binding lookup hash uses rcu, we need to
-	 * make sure we synchronize_rcu before we free our
-	 * entry
-	 */
 	rds_remove_bound(rs);
-	synchronize_rcu();
 
 	rds_send_drop_to(rs, NULL);
 	rds_rdma_drop_keys(rs);
@@ -588,6 +582,8 @@
 {
 	int ret;
 
+	rds_bind_lock_init();
+
 	ret = rds_conn_init();
 	if (ret)
 		goto out;
diff --git a/net/rds/bind.c b/net/rds/bind.c
index dd666fb..bc6b93e 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -38,48 +38,50 @@
 #include <linux/ratelimit.h>
 #include "rds.h"
 
-#define BIND_HASH_SIZE 1024
-static struct hlist_head bind_hash_table[BIND_HASH_SIZE];
-static DEFINE_SPINLOCK(rds_bind_lock);
+struct bind_bucket {
+	rwlock_t                lock;
+	struct hlist_head	head;
+};
 
-static struct hlist_head *hash_to_bucket(__be32 addr, __be16 port)
+#define BIND_HASH_SIZE 1024
+static struct bind_bucket bind_hash_table[BIND_HASH_SIZE];
+
+static struct bind_bucket *hash_to_bucket(__be32 addr, __be16 port)
 {
 	return bind_hash_table + (jhash_2words((u32)addr, (u32)port, 0) &
 				  (BIND_HASH_SIZE - 1));
 }
 
-static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
+/* must hold either read or write lock (write lock for insert != NULL) */
+static struct rds_sock *rds_bind_lookup(struct bind_bucket *bucket,
+					__be32 addr, __be16 port,
 					struct rds_sock *insert)
 {
 	struct rds_sock *rs;
-	struct hlist_head *head = hash_to_bucket(addr, port);
+	struct hlist_head *head = &bucket->head;
 	u64 cmp;
 	u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
 
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(rs, head, rs_bound_node) {
+	hlist_for_each_entry(rs, head, rs_bound_node) {
 		cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
 		      be16_to_cpu(rs->rs_bound_port);
 
 		if (cmp == needle) {
-			rcu_read_unlock();
+			rds_sock_addref(rs);
 			return rs;
 		}
 	}
-	rcu_read_unlock();
 
 	if (insert) {
 		/*
 		 * make sure our addr and port are set before
-		 * we are added to the list, other people
-		 * in rcu will find us as soon as the
-		 * hlist_add_head_rcu is done
+		 * we are added to the list.
 		 */
 		insert->rs_bound_addr = addr;
 		insert->rs_bound_port = port;
 		rds_sock_addref(insert);
 
-		hlist_add_head_rcu(&insert->rs_bound_node, head);
+		hlist_add_head(&insert->rs_bound_node, head);
 	}
 	return NULL;
 }
@@ -93,16 +95,21 @@
 struct rds_sock *rds_find_bound(__be32 addr, __be16 port)
 {
 	struct rds_sock *rs;
+	unsigned long flags;
+	struct bind_bucket *bucket = hash_to_bucket(addr, port);
 
-	rs = rds_bind_lookup(addr, port, NULL);
+	read_lock_irqsave(&bucket->lock, flags);
+	rs = rds_bind_lookup(bucket, addr, port, NULL);
+	read_unlock_irqrestore(&bucket->lock, flags);
 
-	if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
-		rds_sock_addref(rs);
-	else
+	if (rs && sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) {
+		rds_sock_put(rs);
 		rs = NULL;
+	}
 
 	rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr,
 		ntohs(port));
+
 	return rs;
 }
 
@@ -112,6 +119,7 @@
 	unsigned long flags;
 	int ret = -EADDRINUSE;
 	u16 rover, last;
+	struct bind_bucket *bucket;
 
 	if (*port != 0) {
 		rover = be16_to_cpu(*port);
@@ -121,42 +129,48 @@
 		last = rover - 1;
 	}
 
-	spin_lock_irqsave(&rds_bind_lock, flags);
-
 	do {
+		struct rds_sock *rrs;
 		if (rover == 0)
 			rover++;
-		if (!rds_bind_lookup(addr, cpu_to_be16(rover), rs)) {
+
+		bucket = hash_to_bucket(addr, cpu_to_be16(rover));
+		write_lock_irqsave(&bucket->lock, flags);
+		rrs = rds_bind_lookup(bucket, addr, cpu_to_be16(rover), rs);
+		write_unlock_irqrestore(&bucket->lock, flags);
+		if (!rrs) {
 			*port = rs->rs_bound_port;
 			ret = 0;
 			rdsdebug("rs %p binding to %pI4:%d\n",
 			  rs, &addr, (int)ntohs(*port));
 			break;
+		} else {
+			rds_sock_put(rrs);
 		}
 	} while (rover++ != last);
 
-	spin_unlock_irqrestore(&rds_bind_lock, flags);
-
 	return ret;
 }
 
 void rds_remove_bound(struct rds_sock *rs)
 {
 	unsigned long flags;
+	struct bind_bucket *bucket =
+		hash_to_bucket(rs->rs_bound_addr, rs->rs_bound_port);
 
-	spin_lock_irqsave(&rds_bind_lock, flags);
+	write_lock_irqsave(&bucket->lock, flags);
 
 	if (rs->rs_bound_addr) {
 		rdsdebug("rs %p unbinding from %pI4:%d\n",
 		  rs, &rs->rs_bound_addr,
 		  ntohs(rs->rs_bound_port));
 
-		hlist_del_init_rcu(&rs->rs_bound_node);
+		hlist_del_init(&rs->rs_bound_node);
 		rds_sock_put(rs);
 		rs->rs_bound_addr = 0;
 	}
 
-	spin_unlock_irqrestore(&rds_bind_lock, flags);
+	write_unlock_irqrestore(&bucket->lock, flags);
 }
 
 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
@@ -200,9 +214,13 @@
 
 out:
 	release_sock(sk);
-
-	/* we might have called rds_remove_bound on error */
-	if (ret)
-		synchronize_rcu();
 	return ret;
 }
+
+void rds_bind_lock_init(void)
+{
+	int i;
+
+	for (i = 0; i < BIND_HASH_SIZE; i++)
+		rwlock_init(&bind_hash_table[i].lock);
+}
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 49adeef..d456403 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -128,10 +128,7 @@
 	struct rds_transport *loop_trans;
 	unsigned long flags;
 	int ret;
-	struct rds_transport *otrans = trans;
 
-	if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
-		goto new_conn;
 	rcu_read_lock();
 	conn = rds_conn_lookup(net, head, laddr, faddr, trans);
 	if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
@@ -147,7 +144,6 @@
 	if (conn)
 		goto out;
 
-new_conn:
 	conn = kmem_cache_zalloc(rds_conn_slab, gfp);
 	if (!conn) {
 		conn = ERR_PTR(-ENOMEM);
@@ -207,6 +203,7 @@
 
 	atomic_set(&conn->c_state, RDS_CONN_DOWN);
 	conn->c_send_gen = 0;
+	conn->c_outgoing = (is_outgoing ? 1 : 0);
 	conn->c_reconnect_jiffies = 0;
 	INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
 	INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
@@ -243,22 +240,13 @@
 		/* Creating normal conn */
 		struct rds_connection *found;
 
-		if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
-			found = NULL;
-		else
-			found = rds_conn_lookup(net, head, laddr, faddr, trans);
+		found = rds_conn_lookup(net, head, laddr, faddr, trans);
 		if (found) {
 			trans->conn_free(conn->c_transport_data);
 			kmem_cache_free(rds_conn_slab, conn);
 			conn = found;
 		} else {
-			if ((is_outgoing && otrans->t_type == RDS_TRANS_TCP) ||
-			    (otrans->t_type != RDS_TRANS_TCP)) {
-				/* Only the active side should be added to
-				 * reconnect list for TCP.
-				 */
-				hlist_add_head_rcu(&conn->c_hash_node, head);
-			}
+			hlist_add_head_rcu(&conn->c_hash_node, head);
 			rds_cong_add_conn(conn);
 			rds_conn_count++;
 		}
@@ -337,7 +325,9 @@
 	rcu_read_lock();
 	if (!hlist_unhashed(&conn->c_hash_node)) {
 		rcu_read_unlock();
-		rds_queue_reconnect(conn);
+		if (conn->c_trans->t_type != RDS_TRANS_TCP ||
+		    conn->c_outgoing == 1)
+			rds_queue_reconnect(conn);
 	} else {
 		rcu_read_unlock();
 	}
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 2d3f2ab..a833ab7 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -43,14 +43,14 @@
 #include "rds.h"
 #include "ib.h"
 
-static unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE;
-unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */
+unsigned int rds_ib_fmr_1m_pool_size = RDS_FMR_1M_POOL_SIZE;
+unsigned int rds_ib_fmr_8k_pool_size = RDS_FMR_8K_POOL_SIZE;
 unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
 
-module_param(fmr_pool_size, int, 0444);
-MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA");
-module_param(fmr_message_size, int, 0444);
-MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer");
+module_param(rds_ib_fmr_1m_pool_size, int, 0444);
+MODULE_PARM_DESC(rds_ib_fmr_1m_pool_size, " Max number of 1M fmr per HCA");
+module_param(rds_ib_fmr_8k_pool_size, int, 0444);
+MODULE_PARM_DESC(rds_ib_fmr_8k_pool_size, " Max number of 8K fmr per HCA");
 module_param(rds_ib_retry_count, int, 0444);
 MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
 
@@ -97,8 +97,10 @@
 	struct rds_ib_device *rds_ibdev = container_of(work,
 					struct rds_ib_device, free_work);
 
-	if (rds_ibdev->mr_pool)
-		rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
+	if (rds_ibdev->mr_8k_pool)
+		rds_ib_destroy_mr_pool(rds_ibdev->mr_8k_pool);
+	if (rds_ibdev->mr_1m_pool)
+		rds_ib_destroy_mr_pool(rds_ibdev->mr_1m_pool);
 	if (rds_ibdev->pd)
 		ib_dealloc_pd(rds_ibdev->pd);
 
@@ -148,9 +150,13 @@
 	rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE);
 
 	rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32;
-	rds_ibdev->max_fmrs = dev_attr->max_fmr ?
-			min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) :
-			fmr_pool_size;
+	rds_ibdev->max_1m_fmrs = dev_attr->max_mr ?
+		min_t(unsigned int, (dev_attr->max_mr / 2),
+		      rds_ib_fmr_1m_pool_size) : rds_ib_fmr_1m_pool_size;
+
+	rds_ibdev->max_8k_fmrs = dev_attr->max_mr ?
+		min_t(unsigned int, ((dev_attr->max_mr / 2) * RDS_MR_8K_SCALE),
+		      rds_ib_fmr_8k_pool_size) : rds_ib_fmr_8k_pool_size;
 
 	rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom;
 	rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom;
@@ -162,12 +168,25 @@
 		goto put_dev;
 	}
 
-	rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev);
-	if (IS_ERR(rds_ibdev->mr_pool)) {
-		rds_ibdev->mr_pool = NULL;
+	rds_ibdev->mr_1m_pool =
+		rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL);
+	if (IS_ERR(rds_ibdev->mr_1m_pool)) {
+		rds_ibdev->mr_1m_pool = NULL;
 		goto put_dev;
 	}
 
+	rds_ibdev->mr_8k_pool =
+		rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_8K_POOL);
+	if (IS_ERR(rds_ibdev->mr_8k_pool)) {
+		rds_ibdev->mr_8k_pool = NULL;
+		goto put_dev;
+	}
+
+	rdsdebug("RDS/IB: max_mr = %d, max_wrs = %d, max_sge = %d, fmr_max_remaps = %d, max_1m_fmrs = %d, max_8k_fmrs = %d\n",
+		 dev_attr->max_fmr, rds_ibdev->max_wrs, rds_ibdev->max_sge,
+		 rds_ibdev->fmr_max_remaps, rds_ibdev->max_1m_fmrs,
+		 rds_ibdev->max_8k_fmrs);
+
 	INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
 	INIT_LIST_HEAD(&rds_ibdev->conn_list);
 
diff --git a/net/rds/ib.h b/net/rds/ib.h
index aae60fd..f17d095 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -9,8 +9,11 @@
 #include "rds.h"
 #include "rdma_transport.h"
 
-#define RDS_FMR_SIZE			256
-#define RDS_FMR_POOL_SIZE		8192
+#define RDS_FMR_1M_POOL_SIZE		(8192 / 2)
+#define RDS_FMR_1M_MSG_SIZE		256
+#define RDS_FMR_8K_MSG_SIZE		2
+#define RDS_MR_8K_SCALE			(256 / (RDS_FMR_8K_MSG_SIZE + 1))
+#define RDS_FMR_8K_POOL_SIZE		(RDS_MR_8K_SCALE * (8192 / 2))
 
 #define RDS_IB_MAX_SGE			8
 #define RDS_IB_RECV_SGE 		2
@@ -24,6 +27,9 @@
 
 #define RDS_IB_RECYCLE_BATCH_COUNT	32
 
+#define RDS_IB_WC_MAX			32
+#define RDS_IB_SEND_OP			BIT_ULL(63)
+
 extern struct rw_semaphore rds_ib_devices_lock;
 extern struct list_head rds_ib_devices;
 
@@ -89,6 +95,20 @@
 	atomic_t	w_free_ctr;
 };
 
+/* Rings are posted with all the allocations they'll need to queue the
+ * incoming message to the receiving socket so this can't fail.
+ * All fragments start with a header, so we can make sure we're not receiving
+ * garbage, and we can tell a small 8 byte fragment from an ACK frame.
+ */
+struct rds_ib_ack_state {
+	u64		ack_next;
+	u64		ack_recv;
+	unsigned int	ack_required:1;
+	unsigned int	ack_next_valid:1;
+	unsigned int	ack_recv_valid:1;
+};
+
+
 struct rds_ib_device;
 
 struct rds_ib_connection {
@@ -102,6 +122,12 @@
 	struct ib_pd		*i_pd;
 	struct ib_cq		*i_send_cq;
 	struct ib_cq		*i_recv_cq;
+	struct ib_wc		i_send_wc[RDS_IB_WC_MAX];
+	struct ib_wc		i_recv_wc[RDS_IB_WC_MAX];
+
+	/* interrupt handling */
+	struct tasklet_struct	i_send_tasklet;
+	struct tasklet_struct	i_recv_tasklet;
 
 	/* tx */
 	struct rds_ib_work_ring	i_send_ring;
@@ -112,7 +138,6 @@
 	atomic_t		i_signaled_sends;
 
 	/* rx */
-	struct tasklet_struct	i_recv_tasklet;
 	struct mutex		i_recv_mutex;
 	struct rds_ib_work_ring	i_recv_ring;
 	struct rds_ib_incoming	*i_ibinc;
@@ -164,6 +189,12 @@
 struct rds_ib_ipaddr {
 	struct list_head	list;
 	__be32			ipaddr;
+	struct rcu_head		rcu;
+};
+
+enum {
+	RDS_IB_MR_8K_POOL,
+	RDS_IB_MR_1M_POOL,
 };
 
 struct rds_ib_device {
@@ -172,9 +203,12 @@
 	struct list_head	conn_list;
 	struct ib_device	*dev;
 	struct ib_pd		*pd;
-	struct rds_ib_mr_pool	*mr_pool;
-	unsigned int		fmr_max_remaps;
 	unsigned int		max_fmrs;
+	struct rds_ib_mr_pool	*mr_1m_pool;
+	struct rds_ib_mr_pool   *mr_8k_pool;
+	unsigned int		fmr_max_remaps;
+	unsigned int		max_8k_fmrs;
+	unsigned int		max_1m_fmrs;
 	int			max_sge;
 	unsigned int		max_wrs;
 	unsigned int		max_initiator_depth;
@@ -197,14 +231,14 @@
 struct rds_ib_statistics {
 	uint64_t	s_ib_connect_raced;
 	uint64_t	s_ib_listen_closed_stale;
-	uint64_t	s_ib_tx_cq_call;
+	uint64_t	s_ib_evt_handler_call;
+	uint64_t	s_ib_tasklet_call;
 	uint64_t	s_ib_tx_cq_event;
 	uint64_t	s_ib_tx_ring_full;
 	uint64_t	s_ib_tx_throttle;
 	uint64_t	s_ib_tx_sg_mapping_failure;
 	uint64_t	s_ib_tx_stalled;
 	uint64_t	s_ib_tx_credit_updates;
-	uint64_t	s_ib_rx_cq_call;
 	uint64_t	s_ib_rx_cq_event;
 	uint64_t	s_ib_rx_ring_empty;
 	uint64_t	s_ib_rx_refill_from_cq;
@@ -216,12 +250,18 @@
 	uint64_t	s_ib_ack_send_delayed;
 	uint64_t	s_ib_ack_send_piggybacked;
 	uint64_t	s_ib_ack_received;
-	uint64_t	s_ib_rdma_mr_alloc;
-	uint64_t	s_ib_rdma_mr_free;
-	uint64_t	s_ib_rdma_mr_used;
-	uint64_t	s_ib_rdma_mr_pool_flush;
-	uint64_t	s_ib_rdma_mr_pool_wait;
-	uint64_t	s_ib_rdma_mr_pool_depleted;
+	uint64_t	s_ib_rdma_mr_8k_alloc;
+	uint64_t	s_ib_rdma_mr_8k_free;
+	uint64_t	s_ib_rdma_mr_8k_used;
+	uint64_t	s_ib_rdma_mr_8k_pool_flush;
+	uint64_t	s_ib_rdma_mr_8k_pool_wait;
+	uint64_t	s_ib_rdma_mr_8k_pool_depleted;
+	uint64_t	s_ib_rdma_mr_1m_alloc;
+	uint64_t	s_ib_rdma_mr_1m_free;
+	uint64_t	s_ib_rdma_mr_1m_used;
+	uint64_t	s_ib_rdma_mr_1m_pool_flush;
+	uint64_t	s_ib_rdma_mr_1m_pool_wait;
+	uint64_t	s_ib_rdma_mr_1m_pool_depleted;
 	uint64_t	s_ib_atomic_cswp;
 	uint64_t	s_ib_atomic_fadd;
 };
@@ -273,7 +313,8 @@
 void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
 extern struct ib_client rds_ib_client;
 
-extern unsigned int fmr_message_size;
+extern unsigned int rds_ib_fmr_1m_pool_size;
+extern unsigned int rds_ib_fmr_8k_pool_size;
 extern unsigned int rds_ib_retry_count;
 
 extern spinlock_t ib_nodev_conns_lock;
@@ -303,7 +344,8 @@
 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
 void rds_ib_destroy_nodev_conns(void);
-struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *);
+struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev,
+					     int npages);
 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
@@ -323,7 +365,8 @@
 void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
 void rds_ib_inc_free(struct rds_incoming *inc);
 int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
-void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
+void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
+			     struct rds_ib_ack_state *state);
 void rds_ib_recv_tasklet_fn(unsigned long data);
 void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
@@ -331,6 +374,7 @@
 void rds_ib_attempt_ack(struct rds_ib_connection *ic);
 void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
 u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
+void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
 
 /* ib_ring.c */
 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
@@ -348,7 +392,7 @@
 void rds_ib_xmit_complete(struct rds_connection *conn);
 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
 		unsigned int hdr_off, unsigned int sg, unsigned int off);
-void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
+void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
 void rds_ib_send_init_ring(struct rds_ib_connection *ic);
 void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 9043f5c..2b2370e 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -216,6 +216,96 @@
 		 event->event, ib_event_msg(event->event), data);
 }
 
+/* Plucking the oldest entry from the ring can be done concurrently with
+ * the thread refilling the ring.  Each ring operation is protected by
+ * spinlocks and the transient state of refilling doesn't change the
+ * recording of which entry is oldest.
+ *
+ * This relies on IB only calling one cq comp_handler for each cq so that
+ * there will only be one caller of rds_recv_incoming() per RDS connection.
+ */
+static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context)
+{
+	struct rds_connection *conn = context;
+	struct rds_ib_connection *ic = conn->c_transport_data;
+
+	rdsdebug("conn %p cq %p\n", conn, cq);
+
+	rds_ib_stats_inc(s_ib_evt_handler_call);
+
+	tasklet_schedule(&ic->i_recv_tasklet);
+}
+
+static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq,
+		    struct ib_wc *wcs,
+		    struct rds_ib_ack_state *ack_state)
+{
+	int nr;
+	int i;
+	struct ib_wc *wc;
+
+	while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
+		for (i = 0; i < nr; i++) {
+			wc = wcs + i;
+			rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
+				 (unsigned long long)wc->wr_id, wc->status,
+				 wc->byte_len, be32_to_cpu(wc->ex.imm_data));
+
+			if (wc->wr_id & RDS_IB_SEND_OP)
+				rds_ib_send_cqe_handler(ic, wc);
+			else
+				rds_ib_recv_cqe_handler(ic, wc, ack_state);
+		}
+	}
+}
+
+static void rds_ib_tasklet_fn_send(unsigned long data)
+{
+	struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
+	struct rds_connection *conn = ic->conn;
+	struct rds_ib_ack_state state;
+
+	rds_ib_stats_inc(s_ib_tasklet_call);
+
+	memset(&state, 0, sizeof(state));
+	poll_cq(ic, ic->i_send_cq, ic->i_send_wc, &state);
+	ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
+	poll_cq(ic, ic->i_send_cq, ic->i_send_wc, &state);
+
+	if (rds_conn_up(conn) &&
+	    (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
+	    test_bit(0, &conn->c_map_queued)))
+		rds_send_xmit(ic->conn);
+}
+
+static void rds_ib_tasklet_fn_recv(unsigned long data)
+{
+	struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
+	struct rds_connection *conn = ic->conn;
+	struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
+	struct rds_ib_ack_state state;
+
+	if (!rds_ibdev)
+		rds_conn_drop(conn);
+
+	rds_ib_stats_inc(s_ib_tasklet_call);
+
+	memset(&state, 0, sizeof(state));
+	poll_cq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
+	ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
+	poll_cq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
+
+	if (state.ack_next_valid)
+		rds_ib_set_ack(ic, state.ack_next, state.ack_required);
+	if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
+		rds_send_drop_acked(conn, state.ack_recv, NULL);
+		ic->i_ack_recv = state.ack_recv;
+	}
+
+	if (rds_conn_up(conn))
+		rds_ib_attempt_ack(ic);
+}
+
 static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
 {
 	struct rds_connection *conn = data;
@@ -238,6 +328,18 @@
 	}
 }
 
+static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context)
+{
+	struct rds_connection *conn = context;
+	struct rds_ib_connection *ic = conn->c_transport_data;
+
+	rdsdebug("conn %p cq %p\n", conn, cq);
+
+	rds_ib_stats_inc(s_ib_evt_handler_call);
+
+	tasklet_schedule(&ic->i_send_tasklet);
+}
+
 /*
  * This needs to be very careful to not leave IS_ERR pointers around for
  * cleanup to trip over.
@@ -271,7 +373,8 @@
 	ic->i_pd = rds_ibdev->pd;
 
 	cq_attr.cqe = ic->i_send_ring.w_nr + 1;
-	ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler,
+
+	ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send,
 				     rds_ib_cq_event_handler, conn,
 				     &cq_attr);
 	if (IS_ERR(ic->i_send_cq)) {
@@ -282,7 +385,7 @@
 	}
 
 	cq_attr.cqe = ic->i_recv_ring.w_nr;
-	ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler,
+	ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
 				     rds_ib_cq_event_handler, conn,
 				     &cq_attr);
 	if (IS_ERR(ic->i_recv_cq)) {
@@ -637,6 +740,7 @@
 		wait_event(rds_ib_ring_empty_wait,
 			   rds_ib_ring_empty(&ic->i_recv_ring) &&
 			   (atomic_read(&ic->i_signaled_sends) == 0));
+		tasklet_kill(&ic->i_send_tasklet);
 		tasklet_kill(&ic->i_recv_tasklet);
 
 		/* first destroy the ib state that generates callbacks */
@@ -743,8 +847,10 @@
 	}
 
 	INIT_LIST_HEAD(&ic->ib_node);
-	tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn,
-		     (unsigned long) ic);
+	tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send,
+		     (unsigned long)ic);
+	tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv,
+		     (unsigned long)ic);
 	mutex_init(&ic->i_recv_mutex);
 #ifndef KERNEL_HAS_ATOMIC64
 	spin_lock_init(&ic->i_ack_lock);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 251d1ce..a234074 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -65,6 +65,7 @@
  * Our own little FMR pool
  */
 struct rds_ib_mr_pool {
+	unsigned int            pool_type;
 	struct mutex		flush_lock;		/* serialize fmr invalidate */
 	struct delayed_work	flush_worker;		/* flush worker */
 
@@ -83,7 +84,7 @@
 	struct ib_fmr_attr	fmr_attr;
 };
 
-struct workqueue_struct *rds_ib_fmr_wq;
+static struct workqueue_struct *rds_ib_fmr_wq;
 
 int rds_ib_fmr_init(void)
 {
@@ -159,10 +160,8 @@
 	}
 	spin_unlock_irq(&rds_ibdev->spinlock);
 
-	if (to_free) {
-		synchronize_rcu();
-		kfree(to_free);
-	}
+	if (to_free)
+		kfree_rcu(to_free, rcu);
 }
 
 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
@@ -236,7 +235,8 @@
 		rds_conn_destroy(ic->conn);
 }
 
-struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
+struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
+					     int pool_type)
 {
 	struct rds_ib_mr_pool *pool;
 
@@ -244,6 +244,7 @@
 	if (!pool)
 		return ERR_PTR(-ENOMEM);
 
+	pool->pool_type = pool_type;
 	init_llist_head(&pool->free_list);
 	init_llist_head(&pool->drop_list);
 	init_llist_head(&pool->clean_list);
@@ -251,28 +252,30 @@
 	init_waitqueue_head(&pool->flush_wait);
 	INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
 
-	pool->fmr_attr.max_pages = fmr_message_size;
+	if (pool_type == RDS_IB_MR_1M_POOL) {
+		/* +1 allows for unaligned MRs */
+		pool->fmr_attr.max_pages = RDS_FMR_1M_MSG_SIZE + 1;
+		pool->max_items = RDS_FMR_1M_POOL_SIZE;
+	} else {
+		/* pool_type == RDS_IB_MR_8K_POOL */
+		pool->fmr_attr.max_pages = RDS_FMR_8K_MSG_SIZE + 1;
+		pool->max_items = RDS_FMR_8K_POOL_SIZE;
+	}
+
+	pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
 	pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
 	pool->fmr_attr.page_shift = PAGE_SHIFT;
-	pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
-
-	/* We never allow more than max_items MRs to be allocated.
-	 * When we exceed more than max_items_soft, we start freeing
-	 * items more aggressively.
-	 * Make sure that max_items > max_items_soft > max_items / 2
-	 */
 	pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
-	pool->max_items = rds_ibdev->max_fmrs;
 
 	return pool;
 }
 
 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
 {
-	struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
+	struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
 
-	iinfo->rdma_mr_max = pool->max_items;
-	iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
+	iinfo->rdma_mr_max = pool_1m->max_items;
+	iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
 }
 
 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
@@ -314,14 +317,28 @@
 	}
 }
 
-static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
+static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev,
+					  int npages)
 {
-	struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
+	struct rds_ib_mr_pool *pool;
 	struct rds_ib_mr *ibmr = NULL;
 	int err = 0, iter = 0;
 
+	if (npages <= RDS_FMR_8K_MSG_SIZE)
+		pool = rds_ibdev->mr_8k_pool;
+	else
+		pool = rds_ibdev->mr_1m_pool;
+
 	if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
-		schedule_delayed_work(&pool->flush_worker, 10);
+		queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
+
+	/* Switch pools if one of the pool is reaching upper limit */
+	if (atomic_read(&pool->dirty_count) >=  pool->max_items * 9 / 10) {
+		if (pool->pool_type == RDS_IB_MR_8K_POOL)
+			pool = rds_ibdev->mr_1m_pool;
+		else
+			pool = rds_ibdev->mr_8k_pool;
+	}
 
 	while (1) {
 		ibmr = rds_ib_reuse_fmr(pool);
@@ -343,12 +360,18 @@
 		atomic_dec(&pool->item_count);
 
 		if (++iter > 2) {
-			rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
+			if (pool->pool_type == RDS_IB_MR_8K_POOL)
+				rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
+			else
+				rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
 			return ERR_PTR(-EAGAIN);
 		}
 
 		/* We do have some empty MRs. Flush them out. */
-		rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
+		if (pool->pool_type == RDS_IB_MR_8K_POOL)
+			rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
+		else
+			rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
 		rds_ib_flush_mr_pool(pool, 0, &ibmr);
 		if (ibmr)
 			return ibmr;
@@ -373,7 +396,12 @@
 		goto out_no_cigar;
 	}
 
-	rds_ib_stats_inc(s_ib_rdma_mr_alloc);
+	ibmr->pool = pool;
+	if (pool->pool_type == RDS_IB_MR_8K_POOL)
+		rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
+	else
+		rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
+
 	return ibmr;
 
 out_no_cigar:
@@ -429,7 +457,7 @@
 	}
 
 	page_cnt += len >> PAGE_SHIFT;
-	if (page_cnt > fmr_message_size)
+	if (page_cnt > ibmr->pool->fmr_attr.max_pages)
 		return -EINVAL;
 
 	dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
@@ -461,7 +489,10 @@
 	ibmr->sg_dma_len = sg_dma_len;
 	ibmr->remap_count++;
 
-	rds_ib_stats_inc(s_ib_rdma_mr_used);
+	if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
+		rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
+	else
+		rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
 	ret = 0;
 
 out:
@@ -524,8 +555,7 @@
 
 	__rds_ib_teardown_mr(ibmr);
 	if (pinned) {
-		struct rds_ib_device *rds_ibdev = ibmr->device;
-		struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
+		struct rds_ib_mr_pool *pool = ibmr->pool;
 
 		atomic_sub(pinned, &pool->free_pinned);
 	}
@@ -594,7 +624,7 @@
  * to free as many MRs as needed to get back to this limit.
  */
 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
-			        int free_all, struct rds_ib_mr **ibmr_ret)
+				int free_all, struct rds_ib_mr **ibmr_ret)
 {
 	struct rds_ib_mr *ibmr, *next;
 	struct llist_node *clean_nodes;
@@ -605,11 +635,14 @@
 	unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
 	int ret = 0;
 
-	rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
+	if (pool->pool_type == RDS_IB_MR_8K_POOL)
+		rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
+	else
+		rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
 
 	if (ibmr_ret) {
 		DEFINE_WAIT(wait);
-		while(!mutex_trylock(&pool->flush_lock)) {
+		while (!mutex_trylock(&pool->flush_lock)) {
 			ibmr = rds_ib_reuse_fmr(pool);
 			if (ibmr) {
 				*ibmr_ret = ibmr;
@@ -666,8 +699,12 @@
 	list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
 		unpinned += ibmr->sg_len;
 		__rds_ib_teardown_mr(ibmr);
-		if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
-			rds_ib_stats_inc(s_ib_rdma_mr_free);
+		if (nfreed < free_goal ||
+		    ibmr->remap_count >= pool->fmr_attr.max_maps) {
+			if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
+				rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
+			else
+				rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
 			list_del(&ibmr->unmap_list);
 			ib_dealloc_fmr(ibmr->fmr);
 			kfree(ibmr);
@@ -719,8 +756,8 @@
 void rds_ib_free_mr(void *trans_private, int invalidate)
 {
 	struct rds_ib_mr *ibmr = trans_private;
+	struct rds_ib_mr_pool *pool = ibmr->pool;
 	struct rds_ib_device *rds_ibdev = ibmr->device;
-	struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
 
 	rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
 
@@ -759,10 +796,11 @@
 
 	down_read(&rds_ib_devices_lock);
 	list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
-		struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
+		if (rds_ibdev->mr_8k_pool)
+			rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
 
-		if (pool)
-			rds_ib_flush_mr_pool(pool, 0, NULL);
+		if (rds_ibdev->mr_1m_pool)
+			rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
 	}
 	up_read(&rds_ib_devices_lock);
 }
@@ -780,12 +818,12 @@
 		goto out;
 	}
 
-	if (!rds_ibdev->mr_pool) {
+	if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
 		ret = -ENODEV;
 		goto out;
 	}
 
-	ibmr = rds_ib_alloc_fmr(rds_ibdev);
+	ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
 	if (IS_ERR(ibmr)) {
 		rds_ib_dev_put(rds_ibdev);
 		return ibmr;
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index f43831e..96744b7 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -596,8 +596,7 @@
  * wr_id and avoids working with the ring in that case.
  */
 #ifndef KERNEL_HAS_ATOMIC64
-static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
-				int ack_required)
+void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
 {
 	unsigned long flags;
 
@@ -622,8 +621,7 @@
 	return seq;
 }
 #else
-static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
-				int ack_required)
+void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
 {
 	atomic64_set(&ic->i_ack_next, seq);
 	if (ack_required) {
@@ -830,20 +828,6 @@
 	rds_cong_map_updated(map, uncongested);
 }
 
-/*
- * Rings are posted with all the allocations they'll need to queue the
- * incoming message to the receiving socket so this can't fail.
- * All fragments start with a header, so we can make sure we're not receiving
- * garbage, and we can tell a small 8 byte fragment from an ACK frame.
- */
-struct rds_ib_ack_state {
-	u64		ack_next;
-	u64		ack_recv;
-	unsigned int	ack_required:1;
-	unsigned int	ack_next_valid:1;
-	unsigned int	ack_recv_valid:1;
-};
-
 static void rds_ib_process_recv(struct rds_connection *conn,
 				struct rds_ib_recv_work *recv, u32 data_len,
 				struct rds_ib_ack_state *state)
@@ -969,96 +953,50 @@
 	}
 }
 
-/*
- * Plucking the oldest entry from the ring can be done concurrently with
- * the thread refilling the ring.  Each ring operation is protected by
- * spinlocks and the transient state of refilling doesn't change the
- * recording of which entry is oldest.
- *
- * This relies on IB only calling one cq comp_handler for each cq so that
- * there will only be one caller of rds_recv_incoming() per RDS connection.
- */
-void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
-{
-	struct rds_connection *conn = context;
-	struct rds_ib_connection *ic = conn->c_transport_data;
-
-	rdsdebug("conn %p cq %p\n", conn, cq);
-
-	rds_ib_stats_inc(s_ib_rx_cq_call);
-
-	tasklet_schedule(&ic->i_recv_tasklet);
-}
-
-static inline void rds_poll_cq(struct rds_ib_connection *ic,
-			       struct rds_ib_ack_state *state)
+void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
+			     struct ib_wc *wc,
+			     struct rds_ib_ack_state *state)
 {
 	struct rds_connection *conn = ic->conn;
-	struct ib_wc wc;
 	struct rds_ib_recv_work *recv;
 
-	while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
-		rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
-			 (unsigned long long)wc.wr_id, wc.status,
-			 ib_wc_status_msg(wc.status), wc.byte_len,
-			 be32_to_cpu(wc.ex.imm_data));
-		rds_ib_stats_inc(s_ib_rx_cq_event);
+	rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
+		 (unsigned long long)wc->wr_id, wc->status,
+		 ib_wc_status_msg(wc->status), wc->byte_len,
+		 be32_to_cpu(wc->ex.imm_data));
 
-		recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
+	rds_ib_stats_inc(s_ib_rx_cq_event);
+	recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
+	ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1,
+			DMA_FROM_DEVICE);
 
-		ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
-
-		/*
-		 * Also process recvs in connecting state because it is possible
-		 * to get a recv completion _before_ the rdmacm ESTABLISHED
-		 * event is processed.
-		 */
-		if (wc.status == IB_WC_SUCCESS) {
-			rds_ib_process_recv(conn, recv, wc.byte_len, state);
-		} else {
-			/* We expect errors as the qp is drained during shutdown */
-			if (rds_conn_up(conn) || rds_conn_connecting(conn))
-				rds_ib_conn_error(conn, "recv completion on %pI4 had "
-						  "status %u (%s), disconnecting and "
-						  "reconnecting\n", &conn->c_faddr,
-						  wc.status,
-						  ib_wc_status_msg(wc.status));
-		}
-
-		/*
-		 * rds_ib_process_recv() doesn't always consume the frag, and
-		 * we might not have called it at all if the wc didn't indicate
-		 * success. We already unmapped the frag's pages, though, and
-		 * the following rds_ib_ring_free() call tells the refill path
-		 * that it will not find an allocated frag here. Make sure we
-		 * keep that promise by freeing a frag that's still on the ring.
-		 */
-		if (recv->r_frag) {
-			rds_ib_frag_free(ic, recv->r_frag);
-			recv->r_frag = NULL;
-		}
-		rds_ib_ring_free(&ic->i_recv_ring, 1);
+	/* Also process recvs in connecting state because it is possible
+	 * to get a recv completion _before_ the rdmacm ESTABLISHED
+	 * event is processed.
+	 */
+	if (wc->status == IB_WC_SUCCESS) {
+		rds_ib_process_recv(conn, recv, wc->byte_len, state);
+	} else {
+		/* We expect errors as the qp is drained during shutdown */
+		if (rds_conn_up(conn) || rds_conn_connecting(conn))
+			rds_ib_conn_error(conn, "recv completion on %pI4 had status %u (%s), disconnecting and reconnecting\n",
+					  &conn->c_faddr,
+					  wc->status,
+					  ib_wc_status_msg(wc->status));
 	}
-}
 
-void rds_ib_recv_tasklet_fn(unsigned long data)
-{
-	struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
-	struct rds_connection *conn = ic->conn;
-	struct rds_ib_ack_state state = { 0, };
-
-	rds_poll_cq(ic, &state);
-	ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
-	rds_poll_cq(ic, &state);
-
-	if (state.ack_next_valid)
-		rds_ib_set_ack(ic, state.ack_next, state.ack_required);
-	if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
-		rds_send_drop_acked(conn, state.ack_recv, NULL);
-		ic->i_ack_recv = state.ack_recv;
+	/* rds_ib_process_recv() doesn't always consume the frag, and
+	 * we might not have called it at all if the wc didn't indicate
+	 * success. We already unmapped the frag's pages, though, and
+	 * the following rds_ib_ring_free() call tells the refill path
+	 * that it will not find an allocated frag here. Make sure we
+	 * keep that promise by freeing a frag that's still on the ring.
+	 */
+	if (recv->r_frag) {
+		rds_ib_frag_free(ic, recv->r_frag);
+		recv->r_frag = NULL;
 	}
-	if (rds_conn_up(conn))
-		rds_ib_attempt_ack(ic);
+	rds_ib_ring_free(&ic->i_recv_ring, 1);
 
 	/* If we ever end up with a really empty receive ring, we're
 	 * in deep trouble, as the sender will definitely see RNR
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 4e88047..670882c 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -195,7 +195,7 @@
 
 		send->s_op = NULL;
 
-		send->s_wr.wr_id = i;
+		send->s_wr.wr_id = i | RDS_IB_SEND_OP;
 		send->s_wr.sg_list = send->s_sge;
 		send->s_wr.ex.imm_data = 0;
 
@@ -237,81 +237,73 @@
  * unallocs the next free entry in the ring it doesn't alter which is
  * the next to be freed, which is what this is concerned with.
  */
-void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
+void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
 {
-	struct rds_connection *conn = context;
-	struct rds_ib_connection *ic = conn->c_transport_data;
 	struct rds_message *rm = NULL;
-	struct ib_wc wc;
+	struct rds_connection *conn = ic->conn;
 	struct rds_ib_send_work *send;
 	u32 completed;
 	u32 oldest;
 	u32 i = 0;
-	int ret;
 	int nr_sig = 0;
 
-	rdsdebug("cq %p conn %p\n", cq, conn);
-	rds_ib_stats_inc(s_ib_tx_cq_call);
-	ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
-	if (ret)
-		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
 
-	while (ib_poll_cq(cq, 1, &wc) > 0) {
-		rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
-			 (unsigned long long)wc.wr_id, wc.status,
-			 ib_wc_status_msg(wc.status), wc.byte_len,
-			 be32_to_cpu(wc.ex.imm_data));
-		rds_ib_stats_inc(s_ib_tx_cq_event);
+	rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
+		 (unsigned long long)wc->wr_id, wc->status,
+		 ib_wc_status_msg(wc->status), wc->byte_len,
+		 be32_to_cpu(wc->ex.imm_data));
+	rds_ib_stats_inc(s_ib_tx_cq_event);
 
-		if (wc.wr_id == RDS_IB_ACK_WR_ID) {
-			if (time_after(jiffies, ic->i_ack_queued + HZ/2))
-				rds_ib_stats_inc(s_ib_tx_stalled);
-			rds_ib_ack_send_complete(ic);
-			continue;
-		}
+	if (wc->wr_id == RDS_IB_ACK_WR_ID) {
+		if (time_after(jiffies, ic->i_ack_queued + HZ / 2))
+			rds_ib_stats_inc(s_ib_tx_stalled);
+		rds_ib_ack_send_complete(ic);
+		return;
+	}
 
-		oldest = rds_ib_ring_oldest(&ic->i_send_ring);
+	oldest = rds_ib_ring_oldest(&ic->i_send_ring);
 
-		completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
+	completed = rds_ib_ring_completed(&ic->i_send_ring,
+					  (wc->wr_id & ~RDS_IB_SEND_OP),
+					  oldest);
 
-		for (i = 0; i < completed; i++) {
-			send = &ic->i_sends[oldest];
-			if (send->s_wr.send_flags & IB_SEND_SIGNALED)
-				nr_sig++;
+	for (i = 0; i < completed; i++) {
+		send = &ic->i_sends[oldest];
+		if (send->s_wr.send_flags & IB_SEND_SIGNALED)
+			nr_sig++;
 
-			rm = rds_ib_send_unmap_op(ic, send, wc.status);
+		rm = rds_ib_send_unmap_op(ic, send, wc->status);
 
-			if (time_after(jiffies, send->s_queued + HZ/2))
-				rds_ib_stats_inc(s_ib_tx_stalled);
+		if (time_after(jiffies, send->s_queued + HZ / 2))
+			rds_ib_stats_inc(s_ib_tx_stalled);
 
-			if (send->s_op) {
-				if (send->s_op == rm->m_final_op) {
-					/* If anyone waited for this message to get flushed out, wake
-					 * them up now */
-					rds_message_unmapped(rm);
-				}
-				rds_message_put(rm);
-				send->s_op = NULL;
+		if (send->s_op) {
+			if (send->s_op == rm->m_final_op) {
+				/* If anyone waited for this message to get
+				 * flushed out, wake them up now
+				 */
+				rds_message_unmapped(rm);
 			}
-
-			oldest = (oldest + 1) % ic->i_send_ring.w_nr;
+			rds_message_put(rm);
+			send->s_op = NULL;
 		}
 
-		rds_ib_ring_free(&ic->i_send_ring, completed);
-		rds_ib_sub_signaled(ic, nr_sig);
-		nr_sig = 0;
+		oldest = (oldest + 1) % ic->i_send_ring.w_nr;
+	}
 
-		if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
-		    test_bit(0, &conn->c_map_queued))
-			queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+	rds_ib_ring_free(&ic->i_send_ring, completed);
+	rds_ib_sub_signaled(ic, nr_sig);
+	nr_sig = 0;
 
-		/* We expect errors as the qp is drained during shutdown */
-		if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
-			rds_ib_conn_error(conn, "send completion on %pI4 had status "
-					  "%u (%s), disconnecting and reconnecting\n",
-					  &conn->c_faddr, wc.status,
-					  ib_wc_status_msg(wc.status));
-		}
+	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
+	    test_bit(0, &conn->c_map_queued))
+		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+
+	/* We expect errors as the qp is drained during shutdown */
+	if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
+		rds_ib_conn_error(conn, "send completion on %pI4 had status %u (%s), disconnecting and reconnecting\n",
+				  &conn->c_faddr, wc->status,
+				  ib_wc_status_msg(wc->status));
 	}
 }
 
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index 2d5965d..d77e044 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -42,14 +42,14 @@
 static const char *const rds_ib_stat_names[] = {
 	"ib_connect_raced",
 	"ib_listen_closed_stale",
-	"ib_tx_cq_call",
+	"s_ib_evt_handler_call",
+	"ib_tasklet_call",
 	"ib_tx_cq_event",
 	"ib_tx_ring_full",
 	"ib_tx_throttle",
 	"ib_tx_sg_mapping_failure",
 	"ib_tx_stalled",
 	"ib_tx_credit_updates",
-	"ib_rx_cq_call",
 	"ib_rx_cq_event",
 	"ib_rx_ring_empty",
 	"ib_rx_refill_from_cq",
@@ -61,12 +61,18 @@
 	"ib_ack_send_delayed",
 	"ib_ack_send_piggybacked",
 	"ib_ack_received",
-	"ib_rdma_mr_alloc",
-	"ib_rdma_mr_free",
-	"ib_rdma_mr_used",
-	"ib_rdma_mr_pool_flush",
-	"ib_rdma_mr_pool_wait",
-	"ib_rdma_mr_pool_depleted",
+	"ib_rdma_mr_8k_alloc",
+	"ib_rdma_mr_8k_free",
+	"ib_rdma_mr_8k_used",
+	"ib_rdma_mr_8k_pool_flush",
+	"ib_rdma_mr_8k_pool_wait",
+	"ib_rdma_mr_8k_pool_depleted",
+	"ib_rdma_mr_1m_alloc",
+	"ib_rdma_mr_1m_free",
+	"ib_rdma_mr_1m_used",
+	"ib_rdma_mr_1m_pool_flush",
+	"ib_rdma_mr_1m_pool_wait",
+	"ib_rdma_mr_1m_pool_depleted",
 	"ib_atomic_cswp",
 	"ib_atomic_fadd",
 };
diff --git a/net/rds/rds.h b/net/rds/rds.h
index afb4048..543c308 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -86,7 +86,9 @@
 	struct hlist_node	c_hash_node;
 	__be32			c_laddr;
 	__be32			c_faddr;
-	unsigned int		c_loopback:1;
+	unsigned int		c_loopback:1,
+				c_outgoing:1,
+				c_pad_to_32:30;
 	struct rds_connection	*c_passive;
 
 	struct rds_cong_map	*c_lcong;
@@ -603,6 +605,7 @@
 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
 void rds_remove_bound(struct rds_sock *rs);
 struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
+void rds_bind_lock_init(void);
 
 /* cong.c */
 int rds_cong_get_maps(struct rds_connection *conn);
diff --git a/net/rds/send.c b/net/rds/send.c
index 4df61a5..ee49c25 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -38,6 +38,7 @@
 #include <linux/list.h>
 #include <linux/ratelimit.h>
 #include <linux/export.h>
+#include <linux/sizes.h>
 
 #include "rds.h"
 
@@ -51,7 +52,7 @@
  * it to 0 will restore the old behavior (where we looped until we had
  * drained the queue).
  */
-static int send_batch_count = 64;
+static int send_batch_count = SZ_1K;
 module_param(send_batch_count, int, 0444);
 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
 
@@ -223,7 +224,7 @@
 			 * through a lot of messages, lets back off and see
 			 * if anyone else jumps in
 			 */
-			if (batch_count >= 1024)
+			if (batch_count >= send_batch_count)
 				goto over_batch;
 
 			spin_lock_irqsave(&conn->c_lock, flags);
@@ -423,12 +424,15 @@
 		     !list_empty(&conn->c_send_queue)) &&
 		    send_gen == conn->c_send_gen) {
 			rds_stats_inc(s_send_lock_queue_raced);
-			goto restart;
+			if (batch_count < send_batch_count)
+				goto restart;
+			queue_delayed_work(rds_wq, &conn->c_send_w, 1);
 		}
 	}
 out:
 	return ret;
 }
+EXPORT_SYMBOL_GPL(rds_send_xmit);
 
 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
 {
@@ -1120,8 +1124,9 @@
 	 */
 	rds_stats_inc(s_send_queued);
 
-	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
-		rds_send_xmit(conn);
+	ret = rds_send_xmit(conn);
+	if (ret == -ENOMEM || ret == -EAGAIN)
+		queue_delayed_work(rds_wq, &conn->c_send_w, 1);
 
 	rds_message_put(rm);
 	return payload_len;
@@ -1177,8 +1182,9 @@
 	rds_stats_inc(s_send_queued);
 	rds_stats_inc(s_send_pong);
 
-	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
-		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+	ret = rds_send_xmit(conn);
+	if (ret == -ENOMEM || ret == -EAGAIN)
+		queue_delayed_work(rds_wq, &conn->c_send_w, 1);
 
 	rds_message_put(rm);
 	return 0;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index c42b60b..9d6ddba 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -67,21 +67,13 @@
 	set_fs(oldfs);
 }
 
+/* All module specific customizations to the RDS-TCP socket should be done in
+ * rds_tcp_tune() and applied after socket creation. In general these
+ * customizations should be tunable via module_param()
+ */
 void rds_tcp_tune(struct socket *sock)
 {
-	struct sock *sk = sock->sk;
-
 	rds_tcp_nonagle(sock);
-
-	/*
-	 * We're trying to saturate gigabit with the default,
-	 * see svc_sock_setbufsize().
-	 */
-	lock_sock(sk);
-	sk->sk_sndbuf = RDS_TCP_DEFAULT_BUFSIZE;
-	sk->sk_rcvbuf = RDS_TCP_DEFAULT_BUFSIZE;
-	sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
-	release_sock(sk);
 }
 
 u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 444d78d..1d90240 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -110,28 +110,24 @@
 		goto out;
 	}
 	/* An incoming SYN request came in, and TCP just accepted it.
-	 * We always create a new conn for listen side of TCP, and do not
-	 * add it to the c_hash_list.
 	 *
 	 * If the client reboots, this conn will need to be cleaned up.
 	 * rds_tcp_state_change() will do that cleanup
 	 */
 	rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
-	WARN_ON(!rs_tcp || rs_tcp->t_sock);
+	if (rs_tcp->t_sock &&
+	    ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
+		struct sock *nsk = new_sock->sk;
 
-	/*
-	 * see the comment above rds_queue_delayed_reconnect()
-	 */
-	if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
-		if (rds_conn_state(conn) == RDS_CONN_UP)
-			rds_tcp_stats_inc(s_tcp_listen_closed_stale);
-		else
-			rds_tcp_stats_inc(s_tcp_connect_raced);
-		rds_conn_drop(conn);
+		nsk->sk_user_data = NULL;
+		nsk->sk_prot->disconnect(nsk, 0);
+		tcp_done(nsk);
+		new_sock = NULL;
 		ret = 0;
 		goto out;
 	}
 
+	rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
 	rds_tcp_set_callbacks(new_sock, conn);
 	rds_connect_complete(conn);
 	new_sock = NULL;
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 53b17ca..2894e60 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -83,6 +83,7 @@
 	struct rds_tcp_connection *tc = conn->c_transport_data;
 	int done = 0;
 	int ret = 0;
+	int more;
 
 	if (hdr_off == 0) {
 		/*
@@ -116,12 +117,15 @@
 			goto out;
 	}
 
+	more = rm->data.op_nents > 1 ? (MSG_MORE | MSG_SENDPAGE_NOTLAST) : 0;
 	while (sg < rm->data.op_nents) {
+		int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more;
+
 		ret = tc->t_sock->ops->sendpage(tc->t_sock,
 						sg_page(&rm->data.op_sg[sg]),
 						rm->data.op_sg[sg].offset + off,
 						rm->data.op_sg[sg].length - off,
-						MSG_DONTWAIT|MSG_NOSIGNAL);
+						flags);
 		rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]),
 			 rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off,
 			 ret);
@@ -134,6 +138,8 @@
 			off = 0;
 			sg++;
 		}
+		if (sg == rm->data.op_nents - 1)
+			more = 0;
 	}
 
 out:
diff --git a/net/rds/threads.c b/net/rds/threads.c
index dc2402e..454aa6d 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -162,7 +162,9 @@
 	int ret;
 
 	if (rds_conn_state(conn) == RDS_CONN_UP) {
+		clear_bit(RDS_LL_SEND_FULL, &conn->c_flags);
 		ret = rds_send_xmit(conn);
+		cond_resched();
 		rdsdebug("conn %p ret %d\n", conn, ret);
 		switch (ret) {
 		case -EAGAIN:
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 6631f4f..692b3e6 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -808,7 +808,7 @@
 
 	ASSERTCMP(atomic_read(&conn->usage), >, 0);
 
-	conn->put_time = get_seconds();
+	conn->put_time = ktime_get_seconds();
 	if (atomic_dec_and_test(&conn->usage)) {
 		_debug("zombie");
 		rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
@@ -852,7 +852,7 @@
 
 	_enter("");
 
-	now = get_seconds();
+	now = ktime_get_seconds();
 	earliest = ULONG_MAX;
 
 	write_lock_bh(&rxrpc_connection_lock);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index aef1bd2..2934a73 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -208,7 +208,7 @@
 	struct rb_root		server_conns;	/* server connections on this transport */
 	struct list_head	link;		/* link in master session list */
 	struct sk_buff_head	error_queue;	/* error packets awaiting processing */
-	time_t			put_time;	/* time at which to reap */
+	unsigned long		put_time;	/* time at which to reap */
 	spinlock_t		client_lock;	/* client connection allocation lock */
 	rwlock_t		conn_lock;	/* lock for active/dead connections */
 	atomic_t		usage;
@@ -256,7 +256,7 @@
 	struct rxrpc_crypt	csum_iv;	/* packet checksum base */
 	unsigned long		events;
 #define RXRPC_CONN_CHALLENGE	0		/* send challenge packet */
-	time_t			put_time;	/* time at which to reap */
+	unsigned long		put_time;	/* time at which to reap */
 	rwlock_t		lock;		/* access lock */
 	spinlock_t		state_lock;	/* state-change lock */
 	atomic_t		usage;
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
index 1976dec..9946467 100644
--- a/net/rxrpc/ar-transport.c
+++ b/net/rxrpc/ar-transport.c
@@ -189,7 +189,7 @@
 
 	ASSERTCMP(atomic_read(&trans->usage), >, 0);
 
-	trans->put_time = get_seconds();
+	trans->put_time = ktime_get_seconds();
 	if (unlikely(atomic_dec_and_test(&trans->usage))) {
 		_debug("zombie");
 		/* let the reaper determine the timeout to avoid a race with
@@ -226,7 +226,7 @@
 
 	_enter("");
 
-	now = get_seconds();
+	now = ktime_get_seconds();
 	earliest = ULONG_MAX;
 
 	/* extract all the transports that have been dead too long */
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 5019a47..bb41699 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -68,13 +68,13 @@
 	}
 
 	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
-			       proto, &tuple))
+			       proto, ca->net, &tuple))
 		goto out;
 
 	zone.id = ca->zone;
 	zone.dir = NF_CT_DEFAULT_ZONE_DIR;
 
-	thash = nf_conntrack_find_get(dev_net(skb->dev), &zone, &tuple);
+	thash = nf_conntrack_find_get(ca->net, &zone, &tuple);
 	if (!thash)
 		goto out;
 
@@ -119,6 +119,7 @@
 
 		ci = to_connmark(a);
 		ci->tcf_action = parm->action;
+		ci->net = net;
 		ci->zone = parm->zone;
 
 		tcf_hash_insert(a);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 99c9cc1..d058696 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -189,6 +189,7 @@
 	 * worry later - danger - this API seems to have changed
 	 * from earlier kernels
 	 */
+	par.net	     = dev_net(skb->dev);
 	par.in       = skb->dev;
 	par.out      = NULL;
 	par.hooknum  = ipt->tcfi_hook;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 0590816..5faaa54 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -65,11 +65,8 @@
 {
 	switch (code) {
 	case TC_ACT_OK:
-	case TC_ACT_RECLASSIFY:
 	case TC_ACT_SHOT:
-	case TC_ACT_PIPE:
 	case TC_ACT_STOLEN:
-	case TC_ACT_QUEUED:
 	case TC_ACT_REDIRECT:
 	case TC_ACT_UNSPEC:
 		return code;
@@ -298,6 +295,9 @@
 	prog->bpf_name = name;
 	prog->filter = fp;
 
+	if (fp->dst_needed)
+		netif_keep_dst(qdisc_dev(tp->q));
+
 	return 0;
 }
 
@@ -308,14 +308,11 @@
 {
 	bool is_bpf, is_ebpf, have_exts = false;
 	struct tcf_exts exts;
-	u32 classid;
 	int ret;
 
 	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
 	is_ebpf = tb[TCA_BPF_FD];
-
-	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
-	    !tb[TCA_BPF_CLASSID])
+	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
 		return -EINVAL;
 
 	tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
@@ -323,7 +320,6 @@
 	if (ret < 0)
 		return ret;
 
-	classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
 	if (tb[TCA_BPF_FLAGS]) {
 		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
 
@@ -335,7 +331,6 @@
 		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
 	}
 
-	prog->res.classid = classid;
 	prog->exts_integrated = have_exts;
 
 	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
@@ -345,9 +340,12 @@
 		return ret;
 	}
 
-	tcf_bind_filter(tp, &prog->res, base);
-	tcf_exts_change(tp, &prog->exts, &exts);
+	if (tb[TCA_BPF_CLASSID]) {
+		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
+		tcf_bind_filter(tp, &prog->res, base);
+	}
 
+	tcf_exts_change(tp, &prog->exts, &exts);
 	return 0;
 }
 
@@ -468,6 +466,7 @@
 {
 	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
 	struct nlattr *nest;
+	u32 bpf_flags = 0;
 	int ret;
 
 	if (prog == NULL)
@@ -479,7 +478,8 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
+	if (prog->res.classid &&
+	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
 		goto nla_put_failure;
 
 	if (cls_bpf_is_ebpf(prog))
@@ -492,6 +492,11 @@
 	if (tcf_exts_dump(skb, &prog->exts) < 0)
 		goto nla_put_failure;
 
+	if (prog->exts_integrated)
+		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
+	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
+		goto nla_put_failure;
+
 	nla_nest_end(skb, nest);
 
 	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 715e01e..f23a3b6 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -33,7 +33,6 @@
 
 struct fw_head {
 	u32			mask;
-	bool			mask_set;
 	struct fw_filter __rcu	*ht[HTSIZE];
 	struct rcu_head		rcu;
 };
@@ -84,7 +83,7 @@
 			}
 		}
 	} else {
-		/* old method */
+		/* Old method: classify the packet using its skb mark. */
 		if (id && (TC_H_MAJ(id) == 0 ||
 			   !(TC_H_MAJ(id ^ tp->q->handle)))) {
 			res->classid = id;
@@ -114,14 +113,9 @@
 
 static int fw_init(struct tcf_proto *tp)
 {
-	struct fw_head *head;
-
-	head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
-	if (head == NULL)
-		return -ENOBUFS;
-
-	head->mask_set = false;
-	rcu_assign_pointer(tp->root, head);
+	/* We don't allocate fw_head here, because in the old method
+	 * we don't need it at all.
+	 */
 	return 0;
 }
 
@@ -252,7 +246,7 @@
 	int err;
 
 	if (!opt)
-		return handle ? -EINVAL : 0;
+		return handle ? -EINVAL : 0; /* Succeed if it is old method. */
 
 	err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy);
 	if (err < 0)
@@ -302,11 +296,17 @@
 	if (!handle)
 		return -EINVAL;
 
-	if (!head->mask_set) {
-		head->mask = 0xFFFFFFFF;
+	if (!head) {
+		u32 mask = 0xFFFFFFFF;
 		if (tb[TCA_FW_MASK])
-			head->mask = nla_get_u32(tb[TCA_FW_MASK]);
-		head->mask_set = true;
+			mask = nla_get_u32(tb[TCA_FW_MASK]);
+
+		head = kzalloc(sizeof(*head), GFP_KERNEL);
+		if (!head)
+			return -ENOBUFS;
+		head->mask = mask;
+
+		rcu_assign_pointer(tp->root, head);
 	}
 
 	f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c
index df0328b..c66ca94 100644
--- a/net/sched/em_ipset.c
+++ b/net/sched/em_ipset.c
@@ -95,6 +95,7 @@
 	if (skb->skb_iif)
 		indev = dev_get_by_index_rcu(em->net, skb->skb_iif);
 
+	acpar.net     = em->net;
 	acpar.in      = indev ? indev : dev;
 	acpar.out     = dev;
 
diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c
index 094a874..3fee70d 100644
--- a/net/sched/sch_blackhole.c
+++ b/net/sched/sch_blackhole.c
@@ -11,7 +11,7 @@
  * Note: Quantum tunneling is not supported.
  */
 
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
@@ -37,17 +37,8 @@
 	.owner		= THIS_MODULE,
 };
 
-static int __init blackhole_module_init(void)
+static int __init blackhole_init(void)
 {
 	return register_qdisc(&blackhole_qdisc_ops);
 }
-
-static void __exit blackhole_module_exit(void)
-{
-	unregister_qdisc(&blackhole_qdisc_ops);
-}
-
-module_init(blackhole_module_init)
-module_exit(blackhole_module_exit)
-
-MODULE_LICENSE("GPL");
+device_initcall(blackhole_init)
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index f377702..109b232 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -224,13 +224,16 @@
 	if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
 		return &q->internal;
 
-	/* SYNACK messages are attached to a listener socket.
-	 * 1) They are not part of a 'flow' yet
-	 * 2) We do not want to rate limit them (eg SYNFLOOD attack),
+	/* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
+	 * or a listener (SYNCOOKIE mode)
+	 * 1) request sockets are not full blown,
+	 *    they do not contain sk_pacing_rate
+	 * 2) They are not part of a 'flow' yet
+	 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
 	 *    especially if the listener set SO_MAX_PACING_RATE
-	 * 3) We pretend they are orphaned
+	 * 4) We pretend they are orphaned
 	 */
-	if (!sk || sk->sk_state == TCP_LISTEN) {
+	if (!sk || sk_listener(sk)) {
 		unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
 
 		/* By forcing low order bit to 1, we make sure to not
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 197c3f5..b00f1f9 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1208,20 +1208,22 @@
  *   within this document.
  *
  * Our basic strategy is to round-robin transports in priorities
- * according to sctp_state_prio_map[] e.g., if no such
+ * according to sctp_trans_score() e.g., if no such
  * transport with state SCTP_ACTIVE exists, round-robin through
  * SCTP_UNKNOWN, etc. You get the picture.
  */
-static const u8 sctp_trans_state_to_prio_map[] = {
-	[SCTP_ACTIVE]	= 3,	/* best case */
-	[SCTP_UNKNOWN]	= 2,
-	[SCTP_PF]	= 1,
-	[SCTP_INACTIVE] = 0,	/* worst case */
-};
-
 static u8 sctp_trans_score(const struct sctp_transport *trans)
 {
-	return sctp_trans_state_to_prio_map[trans->state];
+	switch (trans->state) {
+	case SCTP_ACTIVE:
+		return 3;	/* best case */
+	case SCTP_UNKNOWN:
+		return 2;
+	case SCTP_PF:
+		return 1;
+	default: /* case SCTP_INACTIVE */
+		return 0;	/* worst case */
+	}
 }
 
 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index b714333..3d9ea9a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1186,7 +1186,7 @@
 	unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
 }
 
-static int __net_init sctp_net_init(struct net *net)
+static int __net_init sctp_defaults_init(struct net *net)
 {
 	int status;
 
@@ -1279,12 +1279,6 @@
 
 	sctp_dbg_objcnt_init(net);
 
-	/* Initialize the control inode/socket for handling OOTB packets.  */
-	if ((status = sctp_ctl_sock_init(net))) {
-		pr_err("Failed to initialize the SCTP control sock\n");
-		goto err_ctl_sock_init;
-	}
-
 	/* Initialize the local address list. */
 	INIT_LIST_HEAD(&net->sctp.local_addr_list);
 	spin_lock_init(&net->sctp.local_addr_lock);
@@ -1300,9 +1294,6 @@
 
 	return 0;
 
-err_ctl_sock_init:
-	sctp_dbg_objcnt_exit(net);
-	sctp_proc_exit(net);
 err_init_proc:
 	cleanup_sctp_mibs(net);
 err_init_mibs:
@@ -1311,15 +1302,12 @@
 	return status;
 }
 
-static void __net_exit sctp_net_exit(struct net *net)
+static void __net_exit sctp_defaults_exit(struct net *net)
 {
 	/* Free the local address list */
 	sctp_free_addr_wq(net);
 	sctp_free_local_addr_list(net);
 
-	/* Free the control endpoint.  */
-	inet_ctl_sock_destroy(net->sctp.ctl_sock);
-
 	sctp_dbg_objcnt_exit(net);
 
 	sctp_proc_exit(net);
@@ -1327,9 +1315,32 @@
 	sctp_sysctl_net_unregister(net);
 }
 
-static struct pernet_operations sctp_net_ops = {
-	.init = sctp_net_init,
-	.exit = sctp_net_exit,
+static struct pernet_operations sctp_defaults_ops = {
+	.init = sctp_defaults_init,
+	.exit = sctp_defaults_exit,
+};
+
+static int __net_init sctp_ctrlsock_init(struct net *net)
+{
+	int status;
+
+	/* Initialize the control inode/socket for handling OOTB packets.  */
+	status = sctp_ctl_sock_init(net);
+	if (status)
+		pr_err("Failed to initialize the SCTP control sock\n");
+
+	return status;
+}
+
+static void __net_init sctp_ctrlsock_exit(struct net *net)
+{
+	/* Free the control endpoint.  */
+	inet_ctl_sock_destroy(net->sctp.ctl_sock);
+}
+
+static struct pernet_operations sctp_ctrlsock_ops = {
+	.init = sctp_ctrlsock_init,
+	.exit = sctp_ctrlsock_exit,
 };
 
 /* Initialize the universe into something sensible.  */
@@ -1462,8 +1473,11 @@
 	sctp_v4_pf_init();
 	sctp_v6_pf_init();
 
-	status = sctp_v4_protosw_init();
+	status = register_pernet_subsys(&sctp_defaults_ops);
+	if (status)
+		goto err_register_defaults;
 
+	status = sctp_v4_protosw_init();
 	if (status)
 		goto err_protosw_init;
 
@@ -1471,9 +1485,9 @@
 	if (status)
 		goto err_v6_protosw_init;
 
-	status = register_pernet_subsys(&sctp_net_ops);
+	status = register_pernet_subsys(&sctp_ctrlsock_ops);
 	if (status)
-		goto err_register_pernet_subsys;
+		goto err_register_ctrlsock;
 
 	status = sctp_v4_add_protocol();
 	if (status)
@@ -1489,12 +1503,14 @@
 err_v6_add_protocol:
 	sctp_v4_del_protocol();
 err_add_protocol:
-	unregister_pernet_subsys(&sctp_net_ops);
-err_register_pernet_subsys:
+	unregister_pernet_subsys(&sctp_ctrlsock_ops);
+err_register_ctrlsock:
 	sctp_v6_protosw_exit();
 err_v6_protosw_init:
 	sctp_v4_protosw_exit();
 err_protosw_init:
+	unregister_pernet_subsys(&sctp_defaults_ops);
+err_register_defaults:
 	sctp_v4_pf_exit();
 	sctp_v6_pf_exit();
 	sctp_sysctl_unregister();
@@ -1527,12 +1543,14 @@
 	sctp_v6_del_protocol();
 	sctp_v4_del_protocol();
 
-	unregister_pernet_subsys(&sctp_net_ops);
+	unregister_pernet_subsys(&sctp_ctrlsock_ops);
 
 	/* Free protosw registrations */
 	sctp_v6_protosw_exit();
 	sctp_v4_protosw_exit();
 
+	unregister_pernet_subsys(&sctp_defaults_ops);
+
 	/* Unregister with socket layer. */
 	sctp_v6_pf_exit();
 	sctp_v4_pf_exit();
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 7954c52..763e06a 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2494,7 +2494,7 @@
 	__u16 sat;
 	int retval = 1;
 	sctp_scope_t scope;
-	time_t stale;
+	u32 stale;
 	struct sctp_af *af;
 	union sctp_addr_param *addr_param;
 	struct sctp_transport *t;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 35df126..6098d4c 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -244,12 +244,13 @@
 	int error;
 	struct sctp_transport *transport = (struct sctp_transport *) peer;
 	struct sctp_association *asoc = transport->asoc;
-	struct net *net = sock_net(asoc->base.sk);
+	struct sock *sk = asoc->base.sk;
+	struct net *net = sock_net(sk);
 
 	/* Check whether a task is in the sock.  */
 
-	bh_lock_sock(asoc->base.sk);
-	if (sock_owned_by_user(asoc->base.sk)) {
+	bh_lock_sock(sk);
+	if (sock_owned_by_user(sk)) {
 		pr_debug("%s: sock is busy\n", __func__);
 
 		/* Try again later.  */
@@ -272,10 +273,10 @@
 			   transport, GFP_ATOMIC);
 
 	if (error)
-		asoc->base.sk->sk_err = -error;
+		sk->sk_err = -error;
 
 out_unlock:
-	bh_unlock_sock(asoc->base.sk);
+	bh_unlock_sock(sk);
 	sctp_transport_put(transport);
 }
 
@@ -285,11 +286,12 @@
 static void sctp_generate_timeout_event(struct sctp_association *asoc,
 					sctp_event_timeout_t timeout_type)
 {
-	struct net *net = sock_net(asoc->base.sk);
+	struct sock *sk = asoc->base.sk;
+	struct net *net = sock_net(sk);
 	int error = 0;
 
-	bh_lock_sock(asoc->base.sk);
-	if (sock_owned_by_user(asoc->base.sk)) {
+	bh_lock_sock(sk);
+	if (sock_owned_by_user(sk)) {
 		pr_debug("%s: sock is busy: timer %d\n", __func__,
 			 timeout_type);
 
@@ -312,10 +314,10 @@
 			   (void *)timeout_type, GFP_ATOMIC);
 
 	if (error)
-		asoc->base.sk->sk_err = -error;
+		sk->sk_err = -error;
 
 out_unlock:
-	bh_unlock_sock(asoc->base.sk);
+	bh_unlock_sock(sk);
 	sctp_association_put(asoc);
 }
 
@@ -365,10 +367,11 @@
 	int error = 0;
 	struct sctp_transport *transport = (struct sctp_transport *) data;
 	struct sctp_association *asoc = transport->asoc;
-	struct net *net = sock_net(asoc->base.sk);
+	struct sock *sk = asoc->base.sk;
+	struct net *net = sock_net(sk);
 
-	bh_lock_sock(asoc->base.sk);
-	if (sock_owned_by_user(asoc->base.sk)) {
+	bh_lock_sock(sk);
+	if (sock_owned_by_user(sk)) {
 		pr_debug("%s: sock is busy\n", __func__);
 
 		/* Try again later.  */
@@ -388,11 +391,11 @@
 			   asoc->state, asoc->ep, asoc,
 			   transport, GFP_ATOMIC);
 
-	 if (error)
-		 asoc->base.sk->sk_err = -error;
+	if (error)
+		sk->sk_err = -error;
 
 out_unlock:
-	bh_unlock_sock(asoc->base.sk);
+	bh_unlock_sock(sk);
 	sctp_transport_put(transport);
 }
 
@@ -403,10 +406,11 @@
 {
 	struct sctp_transport *transport = (struct sctp_transport *) data;
 	struct sctp_association *asoc = transport->asoc;
-	struct net *net = sock_net(asoc->base.sk);
+	struct sock *sk = asoc->base.sk;
+	struct net *net = sock_net(sk);
 
-	bh_lock_sock(asoc->base.sk);
-	if (sock_owned_by_user(asoc->base.sk)) {
+	bh_lock_sock(sk);
+	if (sock_owned_by_user(sk)) {
 		pr_debug("%s: sock is busy\n", __func__);
 
 		/* Try again later.  */
@@ -427,7 +431,7 @@
 		   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
 
 out_unlock:
-	bh_unlock_sock(asoc->base.sk);
+	bh_unlock_sock(sk);
 	sctp_association_put(asoc);
 }
 
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index d7eaa73..6f46aa1 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2306,7 +2306,7 @@
 						 sctp_cmd_seq_t *commands)
 {
 	struct sctp_chunk *chunk = arg;
-	time_t stale;
+	u32 stale;
 	sctp_cookie_preserve_param_t bht;
 	sctp_errhdr_t *err;
 	struct sctp_chunk *reply;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index b140c09..f14f24e 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -297,7 +297,7 @@
 	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 	ret = atomic_dec_and_test(&task->tk_count);
 	if (waitqueue_active(wq))
-		__wake_up_locked_key(wq, TASK_NORMAL, 1, &k);
+		__wake_up_locked_key(wq, TASK_NORMAL, &k);
 	spin_unlock_irqrestore(&wq->lock, flags);
 	return ret;
 }
@@ -1092,14 +1092,10 @@
 rpc_destroy_mempool(void)
 {
 	rpciod_stop();
-	if (rpc_buffer_mempool)
-		mempool_destroy(rpc_buffer_mempool);
-	if (rpc_task_mempool)
-		mempool_destroy(rpc_task_mempool);
-	if (rpc_task_slabp)
-		kmem_cache_destroy(rpc_task_slabp);
-	if (rpc_buffer_slabp)
-		kmem_cache_destroy(rpc_buffer_slabp);
+	mempool_destroy(rpc_buffer_mempool);
+	mempool_destroy(rpc_task_mempool);
+	kmem_cache_destroy(rpc_task_slabp);
+	kmem_cache_destroy(rpc_buffer_slabp);
 	rpc_destroy_wait_queue(&delay_queue);
 }
 
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index ab5dd62..2e98f4a 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -614,6 +614,7 @@
 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
 	xprt->ops->close(xprt);
 	xprt_release_write(xprt, NULL);
+	wake_up_bit(&xprt->state, XPRT_LOCKED);
 }
 
 /**
@@ -723,6 +724,7 @@
 	xprt->ops->release_xprt(xprt, NULL);
 out:
 	spin_unlock_bh(&xprt->transport_lock);
+	wake_up_bit(&xprt->state, XPRT_LOCKED);
 }
 
 /**
@@ -1394,6 +1396,10 @@
 static void xprt_destroy(struct rpc_xprt *xprt)
 {
 	dprintk("RPC:       destroying transport %p\n", xprt);
+
+	/* Exclude transport connect/disconnect handlers */
+	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
+
 	del_timer_sync(&xprt->timer);
 
 	rpc_xprt_debugfs_unregister(xprt);
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index cb25c89..f1e8daf 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -39,25 +39,6 @@
 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
 	    struct rpcrdma_create_data_internal *cdata)
 {
-	struct ib_device_attr *devattr = &ia->ri_devattr;
-	struct ib_mr *mr;
-
-	/* Obtain an lkey to use for the regbufs, which are
-	 * protected from remote access.
-	 */
-	if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
-		ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
-	} else {
-		mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE);
-		if (IS_ERR(mr)) {
-			pr_err("%s: ib_get_dma_mr for failed with %lX\n",
-			       __func__, PTR_ERR(mr));
-			return -ENOMEM;
-		}
-		ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
-		ia->ri_dma_mr = mr;
-	}
-
 	return 0;
 }
 
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index d6653f5..5318951 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -189,11 +189,6 @@
 	struct ib_device_attr *devattr = &ia->ri_devattr;
 	int depth, delta;
 
-	/* Obtain an lkey to use for the regbufs, which are
-	 * protected from remote access.
-	 */
-	ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
-
 	ia->ri_max_frmr_depth =
 			min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
 			      devattr->max_fast_reg_page_list_len);
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
index 72cf8b1..617b76f 100644
--- a/net/sunrpc/xprtrdma/physical_ops.c
+++ b/net/sunrpc/xprtrdma/physical_ops.c
@@ -23,7 +23,6 @@
 physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
 		 struct rpcrdma_create_data_internal *cdata)
 {
-	struct ib_device_attr *devattr = &ia->ri_devattr;
 	struct ib_mr *mr;
 
 	/* Obtain an rkey to use for RPC data payloads.
@@ -37,15 +36,8 @@
 		       __func__, PTR_ERR(mr));
 		return -ENOMEM;
 	}
+
 	ia->ri_dma_mr = mr;
-
-	/* Obtain an lkey to use for regbufs.
-	 */
-	if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
-		ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
-	else
-		ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
-
 	return 0;
 }
 
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 6829967..eb081ad 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1252,7 +1252,7 @@
 		goto out_free;
 
 	iov->length = size;
-	iov->lkey = ia->ri_dma_lkey;
+	iov->lkey = ia->ri_pd->local_dma_lkey;
 	rb->rg_size = size;
 	rb->rg_owner = NULL;
 	return rb;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 0251222..c09414e 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -65,7 +65,6 @@
 	struct rdma_cm_id 	*ri_id;
 	struct ib_pd		*ri_pd;
 	struct ib_mr		*ri_dma_mr;
-	u32			ri_dma_lkey;
 	struct completion	ri_done;
 	int			ri_async_rc;
 	unsigned int		ri_max_frmr_depth;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 7be90bc..1a85e0e 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -777,7 +777,6 @@
 	xs_sock_reset_connection_flags(xprt);
 	/* Mark transport as closed and wake up all pending tasks */
 	xprt_disconnect_done(xprt);
-	xprt_force_disconnect(xprt);
 }
 
 /**
@@ -881,8 +880,11 @@
  */
 static void xs_destroy(struct rpc_xprt *xprt)
 {
+	struct sock_xprt *transport = container_of(xprt,
+			struct sock_xprt, xprt);
 	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
 
+	cancel_delayed_work_sync(&transport->connect_worker);
 	xs_close(xprt);
 	xs_xprt_free(xprt);
 	module_put(THIS_MODULE);
@@ -1435,6 +1437,7 @@
 static void xs_tcp_state_change(struct sock *sk)
 {
 	struct rpc_xprt *xprt;
+	struct sock_xprt *transport;
 
 	read_lock_bh(&sk->sk_callback_lock);
 	if (!(xprt = xprt_from_sock(sk)))
@@ -1446,13 +1449,12 @@
 			sock_flag(sk, SOCK_ZAPPED),
 			sk->sk_shutdown);
 
+	transport = container_of(xprt, struct sock_xprt, xprt);
 	trace_rpc_socket_state_change(xprt, sk->sk_socket);
 	switch (sk->sk_state) {
 	case TCP_ESTABLISHED:
 		spin_lock(&xprt->transport_lock);
 		if (!xprt_test_and_set_connected(xprt)) {
-			struct sock_xprt *transport = container_of(xprt,
-					struct sock_xprt, xprt);
 
 			/* Reset TCP record info */
 			transport->tcp_offset = 0;
@@ -1461,6 +1463,8 @@
 			transport->tcp_flags =
 				TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
 			xprt->connect_cookie++;
+			clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
+			xprt_clear_connecting(xprt);
 
 			xprt_wake_pending_tasks(xprt, -EAGAIN);
 		}
@@ -1496,6 +1500,9 @@
 		smp_mb__after_atomic();
 		break;
 	case TCP_CLOSE:
+		if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
+					&transport->sock_state))
+			xprt_clear_connecting(xprt);
 		xs_sock_mark_closed(xprt);
 	}
  out:
@@ -2179,6 +2186,7 @@
 	/* Tell the socket layer to start connecting... */
 	xprt->stat.connect_count++;
 	xprt->stat.connect_start = jiffies;
+	set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
 	ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
 	switch (ret) {
 	case 0:
@@ -2240,7 +2248,6 @@
 	case -EINPROGRESS:
 	case -EALREADY:
 		xprt_unlock_connect(xprt, transport);
-		xprt_clear_connecting(xprt);
 		return;
 	case -EINVAL:
 		/* Happens, for instance, if the user specified a link
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index fda38f8..7a9ab90 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -1,6 +1,6 @@
 /*
  * net/switchdev/switchdev.c - Switch device API
- * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
  * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
@@ -16,10 +16,83 @@
 #include <linux/notifier.h>
 #include <linux/netdevice.h>
 #include <linux/if_bridge.h>
+#include <linux/list.h>
 #include <net/ip_fib.h>
 #include <net/switchdev.h>
 
 /**
+ *	switchdev_trans_item_enqueue - Enqueue data item to transaction queue
+ *
+ *	@trans: transaction
+ *	@data: pointer to data being queued
+ *	@destructor: data destructor
+ *	@tritem: transaction item being queued
+ *
+ *	Enqeueue data item to transaction queue. tritem is typically placed in
+ *	cointainter pointed at by data pointer. Destructor is called on
+ *	transaction abort and after successful commit phase in case
+ *	the caller did not dequeue the item before.
+ */
+void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
+				  void *data, void (*destructor)(void const *),
+				  struct switchdev_trans_item *tritem)
+{
+	tritem->data = data;
+	tritem->destructor = destructor;
+	list_add_tail(&tritem->list, &trans->item_list);
+}
+EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue);
+
+static struct switchdev_trans_item *
+__switchdev_trans_item_dequeue(struct switchdev_trans *trans)
+{
+	struct switchdev_trans_item *tritem;
+
+	if (list_empty(&trans->item_list))
+		return NULL;
+	tritem = list_first_entry(&trans->item_list,
+				  struct switchdev_trans_item, list);
+	list_del(&tritem->list);
+	return tritem;
+}
+
+/**
+ *	switchdev_trans_item_dequeue - Dequeue data item from transaction queue
+ *
+ *	@trans: transaction
+ */
+void *switchdev_trans_item_dequeue(struct switchdev_trans *trans)
+{
+	struct switchdev_trans_item *tritem;
+
+	tritem = __switchdev_trans_item_dequeue(trans);
+	BUG_ON(!tritem);
+	return tritem->data;
+}
+EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue);
+
+static void switchdev_trans_init(struct switchdev_trans *trans)
+{
+	INIT_LIST_HEAD(&trans->item_list);
+}
+
+static void switchdev_trans_items_destroy(struct switchdev_trans *trans)
+{
+	struct switchdev_trans_item *tritem;
+
+	while ((tritem = __switchdev_trans_item_dequeue(trans)))
+		tritem->destructor(tritem->data);
+}
+
+static void switchdev_trans_items_warn_destroy(struct net_device *dev,
+					       struct switchdev_trans *trans)
+{
+	WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n",
+	     dev->name);
+	switchdev_trans_items_destroy(trans);
+}
+
+/**
  *	switchdev_port_attr_get - Get port attribute
  *
  *	@dev: port device
@@ -31,7 +104,7 @@
 	struct net_device *lower_dev;
 	struct list_head *iter;
 	struct switchdev_attr first = {
-		.id = SWITCHDEV_ATTR_UNDEFINED
+		.id = SWITCHDEV_ATTR_ID_UNDEFINED
 	};
 	int err = -EOPNOTSUPP;
 
@@ -51,7 +124,7 @@
 		err = switchdev_port_attr_get(lower_dev, attr);
 		if (err)
 			break;
-		if (first.id == SWITCHDEV_ATTR_UNDEFINED)
+		if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED)
 			first = *attr;
 		else if (memcmp(&first, attr, sizeof(*attr)))
 			return -ENODATA;
@@ -62,7 +135,8 @@
 EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
 
 static int __switchdev_port_attr_set(struct net_device *dev,
-				     struct switchdev_attr *attr)
+				     struct switchdev_attr *attr,
+				     struct switchdev_trans *trans)
 {
 	const struct switchdev_ops *ops = dev->switchdev_ops;
 	struct net_device *lower_dev;
@@ -70,10 +144,10 @@
 	int err = -EOPNOTSUPP;
 
 	if (ops && ops->switchdev_port_attr_set)
-		return ops->switchdev_port_attr_set(dev, attr);
+		return ops->switchdev_port_attr_set(dev, attr, trans);
 
 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
-		return err;
+		goto done;
 
 	/* Switch device port(s) may be stacked under
 	 * bond/team/vlan dev, so recurse down to set attr on
@@ -81,11 +155,18 @@
 	 */
 
 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
-		err = __switchdev_port_attr_set(lower_dev, attr);
+		err = __switchdev_port_attr_set(lower_dev, attr, trans);
+		if (err == -EOPNOTSUPP &&
+		    attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
+			continue;
 		if (err)
 			break;
 	}
 
+done:
+	if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
+		err = 0;
+
 	return err;
 }
 
@@ -144,6 +225,7 @@
  */
 int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
 {
+	struct switchdev_trans trans;
 	int err;
 
 	if (!rtnl_is_locked()) {
@@ -156,6 +238,8 @@
 		return switchdev_port_attr_set_defer(dev, attr);
 	}
 
+	switchdev_trans_init(&trans);
+
 	/* Phase I: prepare for attr set. Driver/device should fail
 	 * here if there are going to be issues in the commit phase,
 	 * such as lack of resources or support.  The driver/device
@@ -163,18 +247,16 @@
 	 * but should not commit the attr.
 	 */
 
-	attr->trans = SWITCHDEV_TRANS_PREPARE;
-	err = __switchdev_port_attr_set(dev, attr);
+	trans.ph_prepare = true;
+	err = __switchdev_port_attr_set(dev, attr, &trans);
 	if (err) {
 		/* Prepare phase failed: abort the transaction.  Any
 		 * resources reserved in the prepare phase are
 		 * released.
 		 */
 
-		if (err != -EOPNOTSUPP) {
-			attr->trans = SWITCHDEV_TRANS_ABORT;
-			__switchdev_port_attr_set(dev, attr);
-		}
+		if (err != -EOPNOTSUPP)
+			switchdev_trans_items_destroy(&trans);
 
 		return err;
 	}
@@ -184,17 +266,19 @@
 	 * because the driver said everythings was OK in phase I.
 	 */
 
-	attr->trans = SWITCHDEV_TRANS_COMMIT;
-	err = __switchdev_port_attr_set(dev, attr);
+	trans.ph_prepare = false;
+	err = __switchdev_port_attr_set(dev, attr, &trans);
 	WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
 	     dev->name, attr->id);
+	switchdev_trans_items_warn_destroy(dev, &trans);
 
 	return err;
 }
 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
 
 static int __switchdev_port_obj_add(struct net_device *dev,
-				    struct switchdev_obj *obj)
+				    const struct switchdev_obj *obj,
+				    struct switchdev_trans *trans)
 {
 	const struct switchdev_ops *ops = dev->switchdev_ops;
 	struct net_device *lower_dev;
@@ -202,7 +286,7 @@
 	int err = -EOPNOTSUPP;
 
 	if (ops && ops->switchdev_port_obj_add)
-		return ops->switchdev_port_obj_add(dev, obj);
+		return ops->switchdev_port_obj_add(dev, obj, trans);
 
 	/* Switch device port(s) may be stacked under
 	 * bond/team/vlan dev, so recurse down to add object on
@@ -210,7 +294,7 @@
 	 */
 
 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
-		err = __switchdev_port_obj_add(lower_dev, obj);
+		err = __switchdev_port_obj_add(lower_dev, obj, trans);
 		if (err)
 			break;
 	}
@@ -222,6 +306,7 @@
  *	switchdev_port_obj_add - Add port object
  *
  *	@dev: port device
+ *	@id: object ID
  *	@obj: object to add
  *
  *	Use a 2-phase prepare-commit transaction model to ensure
@@ -230,12 +315,16 @@
  *
  *	rtnl_lock must be held.
  */
-int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
+int switchdev_port_obj_add(struct net_device *dev,
+			   const struct switchdev_obj *obj)
 {
+	struct switchdev_trans trans;
 	int err;
 
 	ASSERT_RTNL();
 
+	switchdev_trans_init(&trans);
+
 	/* Phase I: prepare for obj add. Driver/device should fail
 	 * here if there are going to be issues in the commit phase,
 	 * such as lack of resources or support.  The driver/device
@@ -243,18 +332,16 @@
 	 * but should not commit the obj.
 	 */
 
-	obj->trans = SWITCHDEV_TRANS_PREPARE;
-	err = __switchdev_port_obj_add(dev, obj);
+	trans.ph_prepare = true;
+	err = __switchdev_port_obj_add(dev, obj, &trans);
 	if (err) {
 		/* Prepare phase failed: abort the transaction.  Any
 		 * resources reserved in the prepare phase are
 		 * released.
 		 */
 
-		if (err != -EOPNOTSUPP) {
-			obj->trans = SWITCHDEV_TRANS_ABORT;
-			__switchdev_port_obj_add(dev, obj);
-		}
+		if (err != -EOPNOTSUPP)
+			switchdev_trans_items_destroy(&trans);
 
 		return err;
 	}
@@ -264,9 +351,10 @@
 	 * because the driver said everythings was OK in phase I.
 	 */
 
-	obj->trans = SWITCHDEV_TRANS_COMMIT;
-	err = __switchdev_port_obj_add(dev, obj);
+	trans.ph_prepare = false;
+	err = __switchdev_port_obj_add(dev, obj, &trans);
 	WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
+	switchdev_trans_items_warn_destroy(dev, &trans);
 
 	return err;
 }
@@ -276,9 +364,11 @@
  *	switchdev_port_obj_del - Delete port object
  *
  *	@dev: port device
+ *	@id: object ID
  *	@obj: object to delete
  */
-int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj)
+int switchdev_port_obj_del(struct net_device *dev,
+			   const struct switchdev_obj *obj)
 {
 	const struct switchdev_ops *ops = dev->switchdev_ops;
 	struct net_device *lower_dev;
@@ -307,9 +397,12 @@
  *	switchdev_port_obj_dump - Dump port objects
  *
  *	@dev: port device
+ *	@id: object ID
  *	@obj: object to dump
+ *	@cb: function to call with a filled object
  */
-int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj)
+int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
+			    switchdev_obj_dump_cb_t *cb)
 {
 	const struct switchdev_ops *ops = dev->switchdev_ops;
 	struct net_device *lower_dev;
@@ -317,7 +410,7 @@
 	int err = -EOPNOTSUPP;
 
 	if (ops && ops->switchdev_port_obj_dump)
-		return ops->switchdev_port_obj_dump(dev, obj);
+		return ops->switchdev_port_obj_dump(dev, obj, cb);
 
 	/* Switch device port(s) may be stacked under
 	 * bond/team/vlan dev, so recurse down to dump objects on
@@ -325,7 +418,7 @@
 	 */
 
 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
-		err = switchdev_port_obj_dump(lower_dev, obj);
+		err = switchdev_port_obj_dump(lower_dev, obj, cb);
 		break;
 	}
 
@@ -397,7 +490,7 @@
 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
 
 struct switchdev_vlan_dump {
-	struct switchdev_obj obj;
+	struct switchdev_obj_port_vlan vlan;
 	struct sk_buff *skb;
 	u32 filter_mask;
 	u16 flags;
@@ -405,8 +498,7 @@
 	u16 end;
 };
 
-static int switchdev_port_vlan_dump_put(struct net_device *dev,
-					struct switchdev_vlan_dump *dump)
+static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump)
 {
 	struct bridge_vlan_info vinfo;
 
@@ -436,12 +528,11 @@
 	return 0;
 }
 
-static int switchdev_port_vlan_dump_cb(struct net_device *dev,
-				       struct switchdev_obj *obj)
+static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj)
 {
+	struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
 	struct switchdev_vlan_dump *dump =
-		container_of(obj, struct switchdev_vlan_dump, obj);
-	struct switchdev_obj_vlan *vlan = &dump->obj.u.vlan;
+		container_of(vlan, struct switchdev_vlan_dump, vlan);
 	int err = 0;
 
 	if (vlan->vid_begin > vlan->vid_end)
@@ -452,7 +543,7 @@
 		for (dump->begin = dump->end = vlan->vid_begin;
 		     dump->begin <= vlan->vid_end;
 		     dump->begin++, dump->end++) {
-			err = switchdev_port_vlan_dump_put(dev, dump);
+			err = switchdev_port_vlan_dump_put(dump);
 			if (err)
 				return err;
 		}
@@ -464,7 +555,7 @@
 				/* prepend */
 				dump->begin = vlan->vid_begin;
 			} else {
-				err = switchdev_port_vlan_dump_put(dev, dump);
+				err = switchdev_port_vlan_dump_put(dump);
 				dump->flags = vlan->flags;
 				dump->begin = vlan->vid_begin;
 				dump->end = vlan->vid_end;
@@ -476,7 +567,7 @@
 				/* append */
 				dump->end = vlan->vid_end;
 			} else {
-				err = switchdev_port_vlan_dump_put(dev, dump);
+				err = switchdev_port_vlan_dump_put(dump);
 				dump->flags = vlan->flags;
 				dump->begin = vlan->vid_begin;
 				dump->end = vlan->vid_end;
@@ -493,10 +584,7 @@
 				    u32 filter_mask)
 {
 	struct switchdev_vlan_dump dump = {
-		.obj = {
-			.id = SWITCHDEV_OBJ_PORT_VLAN,
-			.cb = switchdev_port_vlan_dump_cb,
-		},
+		.vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
 		.skb = skb,
 		.filter_mask = filter_mask,
 	};
@@ -504,12 +592,13 @@
 
 	if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
 	    (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
-		err = switchdev_port_obj_dump(dev, &dump.obj);
+		err = switchdev_port_obj_dump(dev, &dump.vlan.obj,
+					      switchdev_port_vlan_dump_cb);
 		if (err)
 			goto err_out;
 		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
 			/* last one */
-			err = switchdev_port_vlan_dump_put(dev, &dump);
+			err = switchdev_port_vlan_dump_put(&dump);
 	}
 
 err_out:
@@ -529,7 +618,7 @@
 				  int nlflags)
 {
 	struct switchdev_attr attr = {
-		.id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+		.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
 	};
 	u16 mode = BRIDGE_MODE_UNDEF;
 	u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
@@ -550,7 +639,7 @@
 				     unsigned long brport_flag)
 {
 	struct switchdev_attr attr = {
-		.id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+		.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
 	};
 	u8 flag = nla_get_u8(nlattr);
 	int err;
@@ -617,14 +706,13 @@
 static int switchdev_port_br_afspec(struct net_device *dev,
 				    struct nlattr *afspec,
 				    int (*f)(struct net_device *dev,
-					     struct switchdev_obj *obj))
+					     const struct switchdev_obj *obj))
 {
 	struct nlattr *attr;
 	struct bridge_vlan_info *vinfo;
-	struct switchdev_obj obj = {
-		.id = SWITCHDEV_OBJ_PORT_VLAN,
+	struct switchdev_obj_port_vlan vlan = {
+		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
 	};
-	struct switchdev_obj_vlan *vlan = &obj.u.vlan;
 	int rem;
 	int err;
 
@@ -634,30 +722,30 @@
 		if (nla_len(attr) != sizeof(struct bridge_vlan_info))
 			return -EINVAL;
 		vinfo = nla_data(attr);
-		vlan->flags = vinfo->flags;
+		vlan.flags = vinfo->flags;
 		if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
-			if (vlan->vid_begin)
+			if (vlan.vid_begin)
 				return -EINVAL;
-			vlan->vid_begin = vinfo->vid;
+			vlan.vid_begin = vinfo->vid;
 		} else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
-			if (!vlan->vid_begin)
+			if (!vlan.vid_begin)
 				return -EINVAL;
-			vlan->vid_end = vinfo->vid;
-			if (vlan->vid_end <= vlan->vid_begin)
+			vlan.vid_end = vinfo->vid;
+			if (vlan.vid_end <= vlan.vid_begin)
 				return -EINVAL;
-			err = f(dev, &obj);
+			err = f(dev, &vlan.obj);
 			if (err)
 				return err;
-			memset(vlan, 0, sizeof(*vlan));
+			memset(&vlan, 0, sizeof(vlan));
 		} else {
-			if (vlan->vid_begin)
+			if (vlan.vid_begin)
 				return -EINVAL;
-			vlan->vid_begin = vinfo->vid;
-			vlan->vid_end = vinfo->vid;
-			err = f(dev, &obj);
+			vlan.vid_begin = vinfo->vid;
+			vlan.vid_end = vinfo->vid;
+			err = f(dev, &vlan.obj);
 			if (err)
 				return err;
-			memset(vlan, 0, sizeof(*vlan));
+			memset(&vlan, 0, sizeof(vlan));
 		}
 	}
 
@@ -739,15 +827,13 @@
 			   struct net_device *dev, const unsigned char *addr,
 			   u16 vid, u16 nlm_flags)
 {
-	struct switchdev_obj obj = {
-		.id = SWITCHDEV_OBJ_PORT_FDB,
-		.u.fdb = {
-			.addr = addr,
-			.vid = vid,
-		},
+	struct switchdev_obj_port_fdb fdb = {
+		.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
+		.addr = addr,
+		.vid = vid,
 	};
 
-	return switchdev_port_obj_add(dev, &obj);
+	return switchdev_port_obj_add(dev, &fdb.obj);
 }
 EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
 
@@ -766,30 +852,29 @@
 			   struct net_device *dev, const unsigned char *addr,
 			   u16 vid)
 {
-	struct switchdev_obj obj = {
-		.id = SWITCHDEV_OBJ_PORT_FDB,
-		.u.fdb = {
-			.addr = addr,
-			.vid = vid,
-		},
+	struct switchdev_obj_port_fdb fdb = {
+		.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
+		.addr = addr,
+		.vid = vid,
 	};
 
-	return switchdev_port_obj_del(dev, &obj);
+	return switchdev_port_obj_del(dev, &fdb.obj);
 }
 EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
 
 struct switchdev_fdb_dump {
-	struct switchdev_obj obj;
+	struct switchdev_obj_port_fdb fdb;
+	struct net_device *dev;
 	struct sk_buff *skb;
 	struct netlink_callback *cb;
 	int idx;
 };
 
-static int switchdev_port_fdb_dump_cb(struct net_device *dev,
-				      struct switchdev_obj *obj)
+static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj)
 {
+	struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj);
 	struct switchdev_fdb_dump *dump =
-		container_of(obj, struct switchdev_fdb_dump, obj);
+		container_of(fdb, struct switchdev_fdb_dump, fdb);
 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
 	u32 seq = dump->cb->nlh->nlmsg_seq;
 	struct nlmsghdr *nlh;
@@ -809,13 +894,13 @@
 	ndm->ndm_pad2    = 0;
 	ndm->ndm_flags   = NTF_SELF;
 	ndm->ndm_type    = 0;
-	ndm->ndm_ifindex = dev->ifindex;
-	ndm->ndm_state   = obj->u.fdb.ndm_state;
+	ndm->ndm_ifindex = dump->dev->ifindex;
+	ndm->ndm_state   = fdb->ndm_state;
 
-	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr))
+	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr))
 		goto nla_put_failure;
 
-	if (obj->u.fdb.vid && nla_put_u16(dump->skb, NDA_VLAN, obj->u.fdb.vid))
+	if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid))
 		goto nla_put_failure;
 
 	nlmsg_end(dump->skb, nlh);
@@ -845,16 +930,14 @@
 			    struct net_device *filter_dev, int idx)
 {
 	struct switchdev_fdb_dump dump = {
-		.obj = {
-			.id = SWITCHDEV_OBJ_PORT_FDB,
-			.cb = switchdev_port_fdb_dump_cb,
-		},
+		.fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
+		.dev = dev,
 		.skb = skb,
 		.cb = cb,
 		.idx = idx,
 	};
 
-	switchdev_port_obj_dump(dev, &dump.obj);
+	switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb);
 	return dump.idx;
 }
 EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
@@ -885,7 +968,7 @@
 static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
 {
 	struct switchdev_attr attr = {
-		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
 	};
 	struct switchdev_attr prev_attr;
 	struct net_device *dev = NULL;
@@ -932,17 +1015,15 @@
 int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
 			   u8 tos, u8 type, u32 nlflags, u32 tb_id)
 {
-	struct switchdev_obj fib_obj = {
-		.id = SWITCHDEV_OBJ_IPV4_FIB,
-		.u.ipv4_fib = {
-			.dst = dst,
-			.dst_len = dst_len,
-			.fi = fi,
-			.tos = tos,
-			.type = type,
-			.nlflags = nlflags,
-			.tb_id = tb_id,
-		},
+	struct switchdev_obj_ipv4_fib ipv4_fib = {
+		.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
+		.dst = dst,
+		.dst_len = dst_len,
+		.fi = fi,
+		.tos = tos,
+		.type = type,
+		.nlflags = nlflags,
+		.tb_id = tb_id,
 	};
 	struct net_device *dev;
 	int err = 0;
@@ -963,7 +1044,7 @@
 	if (!dev)
 		return 0;
 
-	err = switchdev_port_obj_add(dev, &fib_obj);
+	err = switchdev_port_obj_add(dev, &ipv4_fib.obj);
 	if (!err)
 		fi->fib_flags |= RTNH_F_OFFLOAD;
 
@@ -986,17 +1067,15 @@
 int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
 			   u8 tos, u8 type, u32 tb_id)
 {
-	struct switchdev_obj fib_obj = {
-		.id = SWITCHDEV_OBJ_IPV4_FIB,
-		.u.ipv4_fib = {
-			.dst = dst,
-			.dst_len = dst_len,
-			.fi = fi,
-			.tos = tos,
-			.type = type,
-			.nlflags = 0,
-			.tb_id = tb_id,
-		},
+	struct switchdev_obj_ipv4_fib ipv4_fib = {
+		.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
+		.dst = dst,
+		.dst_len = dst_len,
+		.fi = fi,
+		.tos = tos,
+		.type = type,
+		.nlflags = 0,
+		.tb_id = tb_id,
 	};
 	struct net_device *dev;
 	int err = 0;
@@ -1008,7 +1087,7 @@
 	if (!dev)
 		return 0;
 
-	err = switchdev_port_obj_del(dev, &fib_obj);
+	err = switchdev_port_obj_del(dev, &ipv4_fib.obj);
 	if (!err)
 		fi->fib_flags &= ~RTNH_F_OFFLOAD;
 
@@ -1040,11 +1119,11 @@
 					  struct net_device *b)
 {
 	struct switchdev_attr a_attr = {
-		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
 		.flags = SWITCHDEV_F_NO_RECURSE,
 	};
 	struct switchdev_attr b_attr = {
-		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
 		.flags = SWITCHDEV_F_NO_RECURSE,
 	};
 
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 562c926..c5ac436 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -539,6 +539,7 @@
 	*err = -TIPC_ERR_NO_NAME;
 	if (skb_linearize(skb))
 		return false;
+	msg = buf_msg(skb);
 	if (msg_reroute_cnt(msg))
 		return false;
 	dnode = addr_domain(net, msg_lookup_scope(msg));
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 03ee4d3..ef31b40 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2179,8 +2179,21 @@
 			if (UNIXCB(skb).fp)
 				scm.fp = scm_fp_dup(UNIXCB(skb).fp);
 
-			sk_peek_offset_fwd(sk, chunk);
+			if (skip) {
+				sk_peek_offset_fwd(sk, chunk);
+				skip -= chunk;
+			}
 
+			if (UNIXCB(skb).fp)
+				break;
+
+			last = skb;
+			last_len = skb->len;
+			unix_state_lock(sk);
+			skb = skb_peek_next(skb, &sk->sk_receive_queue);
+			if (skb)
+				goto again;
+			unix_state_unlock(sk);
 			break;
 		}
 	} while (size);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3893409..f223026 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -419,6 +419,7 @@
 	device_initialize(&rdev->wiphy.dev);
 	rdev->wiphy.dev.class = &ieee80211_class;
 	rdev->wiphy.dev.platform_data = rdev;
+	device_enable_async_suspend(&rdev->wiphy.dev);
 
 	INIT_LIST_HEAD(&rdev->destroy_list);
 	spin_lock_init(&rdev->destroy_list_lock);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5d8748b..f05ba8b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3,6 +3,7 @@
  *
  * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright 2015	Intel Deutschland GmbH
  */
 
 #include <linux/if.h>
@@ -2403,6 +2404,16 @@
 		}
 	}
 
+	if (rdev->ops->get_tx_power) {
+		int dbm, ret;
+
+		ret = rdev_get_tx_power(rdev, wdev, &dbm);
+		if (ret == 0 &&
+		    nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
+				DBM_TO_MBM(dbm)))
+			goto nla_put_failure;
+	}
+
 	if (wdev->ssid_len) {
 		if (nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
 			goto nla_put_failure;
@@ -3998,7 +4009,8 @@
 		params->sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
 	}
 
-	if (statype != CFG80211_STA_TDLS_PEER_SETUP) {
+	if (statype != CFG80211_STA_TDLS_PEER_SETUP &&
+	    statype != CFG80211_STA_AP_CLIENT_UNASSOC) {
 		/* reject other things that can't change */
 		if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD)
 			return -EINVAL;
@@ -4010,7 +4022,8 @@
 			return -EINVAL;
 	}
 
-	if (statype != CFG80211_STA_AP_CLIENT) {
+	if (statype != CFG80211_STA_AP_CLIENT &&
+	    statype != CFG80211_STA_AP_CLIENT_UNASSOC) {
 		if (params->vlan)
 			return -EINVAL;
 	}
@@ -4022,6 +4035,7 @@
 			return -EOPNOTSUPP;
 		break;
 	case CFG80211_STA_AP_CLIENT:
+	case CFG80211_STA_AP_CLIENT_UNASSOC:
 		/* accept only the listed bits */
 		if (params->sta_flags_mask &
 				~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
@@ -9938,6 +9952,9 @@
 				if (!wdev->netdev && !wdev->p2p_started)
 					return -ENETDOWN;
 			}
+
+			if (!vcmd->doit)
+				return -EOPNOTSUPP;
 		} else {
 			wdev = NULL;
 		}
@@ -9957,6 +9974,193 @@
 	return -EOPNOTSUPP;
 }
 
+static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
+				       struct netlink_callback *cb,
+				       struct cfg80211_registered_device **rdev,
+				       struct wireless_dev **wdev)
+{
+	u32 vid, subcmd;
+	unsigned int i;
+	int vcmd_idx = -1;
+	int err;
+	void *data = NULL;
+	unsigned int data_len = 0;
+
+	rtnl_lock();
+
+	if (cb->args[0]) {
+		/* subtract the 1 again here */
+		struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
+		struct wireless_dev *tmp;
+
+		if (!wiphy) {
+			err = -ENODEV;
+			goto out_unlock;
+		}
+		*rdev = wiphy_to_rdev(wiphy);
+		*wdev = NULL;
+
+		if (cb->args[1]) {
+			list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
+				if (tmp->identifier == cb->args[1] - 1) {
+					*wdev = tmp;
+					break;
+				}
+			}
+		}
+
+		/* keep rtnl locked in successful case */
+		return 0;
+	}
+
+	err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
+			  nl80211_fam.attrbuf, nl80211_fam.maxattr,
+			  nl80211_policy);
+	if (err)
+		goto out_unlock;
+
+	if (!nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID] ||
+	    !nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) {
+		err = -EINVAL;
+		goto out_unlock;
+	}
+
+	*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk),
+					   nl80211_fam.attrbuf);
+	if (IS_ERR(*wdev))
+		*wdev = NULL;
+
+	*rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk),
+					   nl80211_fam.attrbuf);
+	if (IS_ERR(*rdev)) {
+		err = PTR_ERR(*rdev);
+		goto out_unlock;
+	}
+
+	vid = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID]);
+	subcmd = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
+
+	for (i = 0; i < (*rdev)->wiphy.n_vendor_commands; i++) {
+		const struct wiphy_vendor_command *vcmd;
+
+		vcmd = &(*rdev)->wiphy.vendor_commands[i];
+
+		if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
+			continue;
+
+		if (!vcmd->dumpit) {
+			err = -EOPNOTSUPP;
+			goto out_unlock;
+		}
+
+		vcmd_idx = i;
+		break;
+	}
+
+	if (vcmd_idx < 0) {
+		err = -EOPNOTSUPP;
+		goto out_unlock;
+	}
+
+	if (nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]) {
+		data = nla_data(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]);
+		data_len = nla_len(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]);
+	}
+
+	/* 0 is the first index - add 1 to parse only once */
+	cb->args[0] = (*rdev)->wiphy_idx + 1;
+	/* add 1 to know if it was NULL */
+	cb->args[1] = *wdev ? (*wdev)->identifier + 1 : 0;
+	cb->args[2] = vcmd_idx;
+	cb->args[3] = (unsigned long)data;
+	cb->args[4] = data_len;
+
+	/* keep rtnl locked in successful case */
+	return 0;
+ out_unlock:
+	rtnl_unlock();
+	return err;
+}
+
+static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
+				   struct netlink_callback *cb)
+{
+	struct cfg80211_registered_device *rdev;
+	struct wireless_dev *wdev;
+	unsigned int vcmd_idx;
+	const struct wiphy_vendor_command *vcmd;
+	void *data;
+	int data_len;
+	int err;
+	struct nlattr *vendor_data;
+
+	err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
+	if (err)
+		return err;
+
+	vcmd_idx = cb->args[2];
+	data = (void *)cb->args[3];
+	data_len = cb->args[4];
+	vcmd = &rdev->wiphy.vendor_commands[vcmd_idx];
+
+	if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
+			   WIPHY_VENDOR_CMD_NEED_NETDEV)) {
+		if (!wdev)
+			return -EINVAL;
+		if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
+		    !wdev->netdev)
+			return -EINVAL;
+
+		if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
+			if (wdev->netdev &&
+			    !netif_running(wdev->netdev))
+				return -ENETDOWN;
+			if (!wdev->netdev && !wdev->p2p_started)
+				return -ENETDOWN;
+		}
+	}
+
+	while (1) {
+		void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).portid,
+					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
+					   NL80211_CMD_VENDOR);
+		if (!hdr)
+			break;
+
+		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+		    (wdev && nla_put_u64(skb, NL80211_ATTR_WDEV,
+					 wdev_id(wdev)))) {
+			genlmsg_cancel(skb, hdr);
+			break;
+		}
+
+		vendor_data = nla_nest_start(skb, NL80211_ATTR_VENDOR_DATA);
+		if (!vendor_data) {
+			genlmsg_cancel(skb, hdr);
+			break;
+		}
+
+		err = vcmd->dumpit(&rdev->wiphy, wdev, skb, data, data_len,
+				   (unsigned long *)&cb->args[5]);
+		nla_nest_end(skb, vendor_data);
+
+		if (err == -ENOBUFS || err == -ENOENT) {
+			genlmsg_cancel(skb, hdr);
+			break;
+		} else if (err) {
+			genlmsg_cancel(skb, hdr);
+			goto out;
+		}
+
+		genlmsg_end(skb, hdr);
+	}
+
+	err = skb->len;
+ out:
+	rtnl_unlock();
+	return err;
+}
+
 struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
 					   enum nl80211_commands cmd,
 					   enum nl80211_attrs attr,
@@ -10994,6 +11198,7 @@
 	{
 		.cmd = NL80211_CMD_VENDOR,
 		.doit = nl80211_vendor_cmd,
+		.dumpit = nl80211_vendor_cmd_dump,
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
 		.internal_flags = NL80211_FLAG_NEED_WIPHY |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2510b231..7258246 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1040,8 +1040,8 @@
 	return ERR_PTR(-EINVAL);
 }
 
-const struct ieee80211_reg_rule *__freq_reg_info(struct wiphy *wiphy,
-						 u32 center_freq, u32 min_bw)
+static const struct ieee80211_reg_rule *
+__freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw)
 {
 	const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy);
 	const struct ieee80211_reg_rule *reg_rule = NULL;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index c48a4b8..cc3676e 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -136,12 +136,12 @@
 	while (likely((err = xfrm_output_one(skb, err)) == 0)) {
 		nf_reset(skb);
 
-		err = skb_dst(skb)->ops->local_out(skb);
+		err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
 		if (unlikely(err != 1))
 			goto out;
 
 		if (!skb_dst(skb)->xfrm)
-			return dst_output(skb->sk, skb);
+			return dst_output(net, skb->sk, skb);
 
 		err = nf_hook(skb_dst(skb)->ops->family,
 			      NF_INET_POST_ROUTING, net, skb->sk, skb,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index e7f64bc..09bfcba 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1208,7 +1208,7 @@
 	}
 }
 
-static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
+static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
 						 const struct flowi *fl)
 {
 	struct xfrm_policy *pol;
@@ -1887,6 +1887,7 @@
 	struct sock *sk;
 	struct dst_entry *dst;
 	struct xfrm_policy *pol = (struct xfrm_policy *)arg;
+	struct net *net = xp_net(pol);
 	struct xfrm_policy_queue *pq = &pol->polq;
 	struct flowi fl;
 	struct sk_buff_head list;
@@ -1903,8 +1904,7 @@
 	spin_unlock(&pq->hold_queue.lock);
 
 	dst_hold(dst->path);
-	dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
-			  sk, 0);
+	dst = xfrm_lookup(net, dst->path, &fl, sk, 0);
 	if (IS_ERR(dst))
 		goto purge_queue;
 
@@ -1934,8 +1934,7 @@
 
 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
 		dst_hold(skb_dst(skb)->path);
-		dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
-				  &fl, skb->sk, 0);
+		dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0);
 		if (IS_ERR(dst)) {
 			kfree_skb(skb);
 			continue;
@@ -1945,7 +1944,7 @@
 		skb_dst_drop(skb);
 		skb_dst_set(skb, dst);
 
-		dst_output(skb->sk, skb);
+		dst_output(net, skb->sk, skb);
 	}
 
 out:
@@ -1958,7 +1957,7 @@
 	xfrm_pol_put(pol);
 }
 
-static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
+static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	unsigned long sched_next;
 	struct dst_entry *dst = skb_dst(skb);
@@ -2185,7 +2184,7 @@
  */
 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
 			      const struct flowi *fl,
-			      struct sock *sk, int flags)
+			      const struct sock *sk, int flags)
 {
 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
 	struct flow_cache_object *flo;
@@ -2333,7 +2332,7 @@
  */
 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
 				    const struct flowi *fl,
-				    struct sock *sk, int flags)
+				    const struct sock *sk, int flags)
 {
 	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
 					    flags | XFRM_LOOKUP_QUEUE |
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
index 7235e29..b7f63c70 100644
--- a/samples/bpf/libbpf.h
+++ b/samples/bpf/libbpf.h
@@ -64,6 +64,14 @@
 		.off   = 0,					\
 		.imm   = 0 })
 
+#define BPF_MOV32_REG(DST, SRC)					\
+	((struct bpf_insn) {					\
+		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
+		.dst_reg = DST,					\
+		.src_reg = SRC,					\
+		.off   = 0,					\
+		.imm   = 0 })
+
 /* Short form of mov, dst_reg = imm32 */
 
 #define BPF_MOV64_IMM(DST, IMM)					\
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index ee0f110..563c507 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -15,20 +15,27 @@
 #include <string.h>
 #include <linux/filter.h>
 #include <stddef.h>
+#include <stdbool.h>
+#include <sys/resource.h>
 #include "libbpf.h"
 
 #define MAX_INSNS 512
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
 
+#define MAX_FIXUPS 8
+
 struct bpf_test {
 	const char *descr;
 	struct bpf_insn	insns[MAX_INSNS];
-	int fixup[32];
+	int fixup[MAX_FIXUPS];
+	int prog_array_fixup[MAX_FIXUPS];
 	const char *errstr;
+	const char *errstr_unpriv;
 	enum {
+		UNDEF,
 		ACCEPT,
 		REJECT
-	} result;
+	} result, result_unpriv;
 	enum bpf_prog_type prog_type;
 };
 
@@ -96,6 +103,7 @@
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "invalid BPF_LD_IMM insn",
+		.errstr_unpriv = "R1 pointer comparison",
 		.result = REJECT,
 	},
 	{
@@ -109,6 +117,7 @@
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "invalid BPF_LD_IMM insn",
+		.errstr_unpriv = "R1 pointer comparison",
 		.result = REJECT,
 	},
 	{
@@ -219,6 +228,7 @@
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "R0 !read_ok",
+		.errstr_unpriv = "R1 pointer comparison",
 		.result = REJECT,
 	},
 	{
@@ -294,7 +304,9 @@
 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "R0 leaks addr",
 		.result = ACCEPT,
+		.result_unpriv = REJECT,
 	},
 	{
 		"check corrupted spill/fill",
@@ -310,6 +322,7 @@
 
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "attempt to corrupt spilled",
 		.errstr = "corrupted spill",
 		.result = REJECT,
 	},
@@ -473,6 +486,7 @@
 		},
 		.fixup = {3},
 		.errstr = "R0 invalid mem access",
+		.errstr_unpriv = "R0 leaks addr",
 		.result = REJECT,
 	},
 	{
@@ -495,6 +509,8 @@
 			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "R1 pointer comparison",
+		.result_unpriv = REJECT,
 		.result = ACCEPT,
 	},
 	{
@@ -521,6 +537,8 @@
 			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "R1 pointer comparison",
+		.result_unpriv = REJECT,
 		.result = ACCEPT,
 	},
 	{
@@ -555,6 +573,8 @@
 			BPF_EXIT_INSN(),
 		},
 		.fixup = {24},
+		.errstr_unpriv = "R1 pointer comparison",
+		.result_unpriv = REJECT,
 		.result = ACCEPT,
 	},
 	{
@@ -603,6 +623,8 @@
 			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "R1 pointer comparison",
+		.result_unpriv = REJECT,
 		.result = ACCEPT,
 	},
 	{
@@ -642,6 +664,8 @@
 			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "R1 pointer comparison",
+		.result_unpriv = REJECT,
 		.result = ACCEPT,
 	},
 	{
@@ -699,6 +723,7 @@
 		},
 		.fixup = {4},
 		.errstr = "different pointers",
+		.errstr_unpriv = "R1 pointer comparison",
 		.result = REJECT,
 	},
 	{
@@ -720,6 +745,7 @@
 		},
 		.fixup = {6},
 		.errstr = "different pointers",
+		.errstr_unpriv = "R1 pointer comparison",
 		.result = REJECT,
 	},
 	{
@@ -742,6 +768,7 @@
 		},
 		.fixup = {7},
 		.errstr = "different pointers",
+		.errstr_unpriv = "R1 pointer comparison",
 		.result = REJECT,
 	},
 	{
@@ -752,6 +779,7 @@
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "invalid bpf_context access",
+		.errstr_unpriv = "R1 leaks addr",
 		.result = REJECT,
 	},
 	{
@@ -762,6 +790,7 @@
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "invalid bpf_context access",
+		.errstr_unpriv = "R1 leaks addr",
 		.result = REJECT,
 	},
 	{
@@ -772,16 +801,18 @@
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "invalid bpf_context access",
+		.errstr_unpriv = "R1 leaks addr",
 		.result = REJECT,
 	},
 	{
 		"check out of range skb->cb access",
 		.insns = {
 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
-				    offsetof(struct __sk_buff, cb[60])),
+				    offsetof(struct __sk_buff, cb[0]) + 256),
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "invalid bpf_context access",
+		.errstr_unpriv = "",
 		.result = REJECT,
 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
 	},
@@ -803,6 +834,8 @@
 			BPF_EXIT_INSN(),
 		},
 		.result = ACCEPT,
+		.errstr_unpriv = "R1 leaks addr",
+		.result_unpriv = REJECT,
 	},
 	{
 		"write skb fields from tc_cls_act prog",
@@ -819,6 +852,8 @@
 				    offsetof(struct __sk_buff, cb[3])),
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "",
+		.result_unpriv = REJECT,
 		.result = ACCEPT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 	},
@@ -881,6 +916,270 @@
 		.result = REJECT,
 		.errstr = "invalid stack off=0 size=8",
 	},
+	{
+		"unpriv: return pointer",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 leaks addr",
+	},
+	{
+		"unpriv: add const to pointer",
+		.insns = {
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R1 pointer arithmetic",
+	},
+	{
+		"unpriv: add pointer to pointer",
+		.insns = {
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R1 pointer arithmetic",
+	},
+	{
+		"unpriv: neg pointer",
+		.insns = {
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R1 pointer arithmetic",
+	},
+	{
+		"unpriv: cmp pointer with const",
+		.insns = {
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R1 pointer comparison",
+	},
+	{
+		"unpriv: cmp pointer with pointer",
+		.insns = {
+			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R10 pointer comparison",
+	},
+	{
+		"unpriv: check that printk is disallowed",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_IMM(BPF_REG_2, 8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "unknown func 6",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: pass pointer to helper function",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup = {3},
+		.errstr_unpriv = "R4 leaks addr",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: indirectly pass pointer on stack to helper function",
+		.insns = {
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup = {3},
+		.errstr = "invalid indirect read from stack off -8+0 size 8",
+		.result = REJECT,
+	},
+	{
+		"unpriv: mangle pointer on stack 1",
+		.insns = {
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+			BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "attempt to corrupt spilled",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: mangle pointer on stack 2",
+		.insns = {
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "attempt to corrupt spilled",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: read pointer from stack in small chunks",
+		.insns = {
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr = "invalid size",
+		.result = REJECT,
+	},
+	{
+		"unpriv: write pointer into ctx",
+		.insns = {
+			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R1 leaks addr",
+		.result_unpriv = REJECT,
+		.errstr = "invalid bpf_context access",
+		.result = REJECT,
+	},
+	{
+		"unpriv: write pointer into map elem value",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup = {3},
+		.errstr_unpriv = "R0 leaks addr",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: partial copy of pointer",
+		.insns = {
+			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R10 partial copy",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: pass pointer to tail_call",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+			BPF_LD_MAP_FD(BPF_REG_2, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_array_fixup = {1},
+		.errstr_unpriv = "R3 leaks addr into helper",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: cmp map pointer with zero",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_1, 0),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup = {1},
+		.errstr_unpriv = "R1 pointer comparison",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: write into frame pointer",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr = "frame pointer is read only",
+		.result = REJECT,
+	},
+	{
+		"unpriv: cmp of frame pointer",
+		.insns = {
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R10 pointer comparison",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: cmp of stack pointer",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R2 pointer comparison",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: obfuscate stack pointer",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R2 pointer arithmetic",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
 };
 
 static int probe_filter_length(struct bpf_insn *fp)
@@ -896,13 +1195,24 @@
 
 static int create_map(void)
 {
-	long long key, value = 0;
 	int map_fd;
 
-	map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 1024);
-	if (map_fd < 0) {
+	map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
+				sizeof(long long), sizeof(long long), 1024);
+	if (map_fd < 0)
 		printf("failed to create map '%s'\n", strerror(errno));
-	}
+
+	return map_fd;
+}
+
+static int create_prog_array(void)
+{
+	int map_fd;
+
+	map_fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY,
+				sizeof(int), sizeof(int), 4);
+	if (map_fd < 0)
+		printf("failed to create prog_array '%s'\n", strerror(errno));
 
 	return map_fd;
 }
@@ -910,13 +1220,17 @@
 static int test(void)
 {
 	int prog_fd, i, pass_cnt = 0, err_cnt = 0;
+	bool unpriv = geteuid() != 0;
 
 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
 		struct bpf_insn *prog = tests[i].insns;
 		int prog_type = tests[i].prog_type;
 		int prog_len = probe_filter_length(prog);
 		int *fixup = tests[i].fixup;
-		int map_fd = -1;
+		int *prog_array_fixup = tests[i].prog_array_fixup;
+		int expected_result;
+		const char *expected_errstr;
+		int map_fd = -1, prog_array_fd = -1;
 
 		if (*fixup) {
 			map_fd = create_map();
@@ -926,13 +1240,31 @@
 				fixup++;
 			} while (*fixup);
 		}
+		if (*prog_array_fixup) {
+			prog_array_fd = create_prog_array();
+
+			do {
+				prog[*prog_array_fixup].imm = prog_array_fd;
+				prog_array_fixup++;
+			} while (*prog_array_fixup);
+		}
 		printf("#%d %s ", i, tests[i].descr);
 
 		prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
 					prog, prog_len * sizeof(struct bpf_insn),
 					"GPL", 0);
 
-		if (tests[i].result == ACCEPT) {
+		if (unpriv && tests[i].result_unpriv != UNDEF)
+			expected_result = tests[i].result_unpriv;
+		else
+			expected_result = tests[i].result;
+
+		if (unpriv && tests[i].errstr_unpriv)
+			expected_errstr = tests[i].errstr_unpriv;
+		else
+			expected_errstr = tests[i].errstr;
+
+		if (expected_result == ACCEPT) {
 			if (prog_fd < 0) {
 				printf("FAIL\nfailed to load prog '%s'\n",
 				       strerror(errno));
@@ -947,7 +1279,7 @@
 				err_cnt++;
 				goto fail;
 			}
-			if (strstr(bpf_log_buf, tests[i].errstr) == 0) {
+			if (strstr(bpf_log_buf, expected_errstr) == 0) {
 				printf("FAIL\nunexpected error message: %s",
 				       bpf_log_buf);
 				err_cnt++;
@@ -960,6 +1292,8 @@
 fail:
 		if (map_fd >= 0)
 			close(map_fd);
+		if (prog_array_fd >= 0)
+			close(prog_array_fd);
 		close(prog_fd);
 
 	}
@@ -970,5 +1304,8 @@
 
 int main(void)
 {
+	struct rlimit r = {1 << 20, 1 << 20};
+
+	setrlimit(RLIMIT_MEMLOCK, &r);
 	return test();
 }
diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c
index 9119ac6..c285a3b 100644
--- a/samples/kprobes/jprobe_example.c
+++ b/samples/kprobes/jprobe_example.c
@@ -1,13 +1,13 @@
 /*
  * Here's a sample kernel module showing the use of jprobes to dump
- * the arguments of do_fork().
+ * the arguments of _do_fork().
  *
  * For more information on theory of operation of jprobes, see
  * Documentation/kprobes.txt
  *
  * Build and insert the kernel module as done in the kprobe example.
  * You will see the trace data in /var/log/messages and on the
- * console whenever do_fork() is invoked to create a new process.
+ * console whenever _do_fork() is invoked to create a new process.
  * (Some messages may be suppressed if syslogd is configured to
  * eliminate duplicate messages.)
  */
@@ -17,13 +17,13 @@
 #include <linux/kprobes.h>
 
 /*
- * Jumper probe for do_fork.
+ * Jumper probe for _do_fork.
  * Mirror principle enables access to arguments of the probed routine
  * from the probe handler.
  */
 
-/* Proxy routine having the same arguments as actual do_fork() routine */
-static long jdo_fork(unsigned long clone_flags, unsigned long stack_start,
+/* Proxy routine having the same arguments as actual _do_fork() routine */
+static long j_do_fork(unsigned long clone_flags, unsigned long stack_start,
 	      unsigned long stack_size, int __user *parent_tidptr,
 	      int __user *child_tidptr)
 {
@@ -36,9 +36,9 @@
 }
 
 static struct jprobe my_jprobe = {
-	.entry			= jdo_fork,
+	.entry			= j_do_fork,
 	.kp = {
-		.symbol_name	= "do_fork",
+		.symbol_name	= "_do_fork",
 	},
 };
 
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c
index 366db1a..727eb21 100644
--- a/samples/kprobes/kprobe_example.c
+++ b/samples/kprobes/kprobe_example.c
@@ -1,13 +1,13 @@
 /*
  * NOTE: This example is works on x86 and powerpc.
  * Here's a sample kernel module showing the use of kprobes to dump a
- * stack trace and selected registers when do_fork() is called.
+ * stack trace and selected registers when _do_fork() is called.
  *
  * For more information on theory of operation of kprobes, see
  * Documentation/kprobes.txt
  *
  * You will see the trace data in /var/log/messages and on the console
- * whenever do_fork() is invoked to create a new process.
+ * whenever _do_fork() is invoked to create a new process.
  */
 
 #include <linux/kernel.h>
@@ -16,7 +16,7 @@
 
 /* For each probe you need to allocate a kprobe structure */
 static struct kprobe kp = {
-	.symbol_name	= "do_fork",
+	.symbol_name	= "_do_fork",
 };
 
 /* kprobe pre_handler: called just before the probed instruction is executed */
diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c
index 1041b67..ebb1d1a 100644
--- a/samples/kprobes/kretprobe_example.c
+++ b/samples/kprobes/kretprobe_example.c
@@ -7,7 +7,7 @@
  *
  * usage: insmod kretprobe_example.ko func=<func_name>
  *
- * If no func_name is specified, do_fork is instrumented
+ * If no func_name is specified, _do_fork is instrumented
  *
  * For more information on theory of operation of kretprobes, see
  * Documentation/kprobes.txt
@@ -25,7 +25,7 @@
 #include <linux/limits.h>
 #include <linux/sched.h>
 
-static char func_name[NAME_MAX] = "do_fork";
+static char func_name[NAME_MAX] = "_do_fork";
 module_param_string(func, func_name, NAME_MAX, S_IRUGO);
 MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the"
 			" function's execution time");
diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c
index fd0db01..b071bf4 100644
--- a/scripts/extract-cert.c
+++ b/scripts/extract-cert.c
@@ -1,15 +1,15 @@
 /* Extract X.509 certificate in DER form from PKCS#11 or PEM.
  *
- * Copyright © 2014 Red Hat, Inc. All Rights Reserved.
- * Copyright © 2015 Intel Corporation.
+ * Copyright © 2014-2015 Red Hat, Inc. All Rights Reserved.
+ * Copyright © 2015      Intel Corporation.
  *
  * Authors: David Howells <dhowells@redhat.com>
  *          David Woodhouse <dwmw2@infradead.org>
  *
  * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the licence, or (at your option) any later version.
  */
 #define _GNU_SOURCE
 #include <stdio.h>
@@ -17,13 +17,9 @@
 #include <stdint.h>
 #include <stdbool.h>
 #include <string.h>
-#include <getopt.h>
 #include <err.h>
-#include <arpa/inet.h>
 #include <openssl/bio.h>
-#include <openssl/evp.h>
 #include <openssl/pem.h>
-#include <openssl/pkcs7.h>
 #include <openssl/err.h>
 #include <openssl/engine.h>
 
@@ -86,7 +82,7 @@
 		ERR(!wb, "%s", cert_dst);
 	}
 	X509_NAME_oneline(X509_get_subject_name(x509), buf, sizeof(buf));
-	ERR(!i2d_X509_bio(wb, x509), cert_dst);
+	ERR(!i2d_X509_bio(wb, x509), "%s", cert_dst);
 	if (kbuild_verbose)
 		fprintf(stderr, "Extracted cert: %s\n", buf);
 }
diff --git a/scripts/sign-file.c b/scripts/sign-file.c
index 058bba3..250a7a6 100755
--- a/scripts/sign-file.c
+++ b/scripts/sign-file.c
@@ -1,12 +1,15 @@
 /* Sign a module file using the given key.
  *
- * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
+ * Copyright © 2014-2015 Red Hat, Inc. All Rights Reserved.
+ * Copyright © 2015      Intel Corporation.
+ *
+ * Authors: David Howells <dhowells@redhat.com>
+ *          David Woodhouse <dwmw2@infradead.org>
  *
  * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the licence, or (at your option) any later version.
  */
 #define _GNU_SOURCE
 #include <stdio.h>
@@ -17,13 +20,34 @@
 #include <getopt.h>
 #include <err.h>
 #include <arpa/inet.h>
+#include <openssl/opensslv.h>
 #include <openssl/bio.h>
 #include <openssl/evp.h>
 #include <openssl/pem.h>
-#include <openssl/cms.h>
 #include <openssl/err.h>
 #include <openssl/engine.h>
 
+/*
+ * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to
+ * assume that it's not available and its header file is missing and that we
+ * should use PKCS#7 instead.  Switching to the older PKCS#7 format restricts
+ * the options we have on specifying the X.509 certificate we want.
+ *
+ * Further, older versions of OpenSSL don't support manually adding signers to
+ * the PKCS#7 message so have to accept that we get a certificate included in
+ * the signature message.  Nor do such older versions of OpenSSL support
+ * signing with anything other than SHA1 - so we're stuck with that if such is
+ * the case.
+ */
+#if OPENSSL_VERSION_NUMBER < 0x10000000L
+#define USE_PKCS7
+#endif
+#ifndef USE_PKCS7
+#include <openssl/cms.h>
+#else
+#include <openssl/pkcs7.h>
+#endif
+
 struct module_signature {
 	uint8_t		algo;		/* Public-key crypto algorithm [0] */
 	uint8_t		hash;		/* Digest algorithm [0] */
@@ -107,30 +131,42 @@
 	struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 };
 	char *hash_algo = NULL;
 	char *private_key_name, *x509_name, *module_name, *dest_name;
-	bool save_cms = false, replace_orig;
+	bool save_sig = false, replace_orig;
 	bool sign_only = false;
 	unsigned char buf[4096];
-	unsigned long module_size, cms_size;
-	unsigned int use_keyid = 0, use_signed_attrs = CMS_NOATTR;
+	unsigned long module_size, sig_size;
+	unsigned int use_signed_attrs;
 	const EVP_MD *digest_algo;
 	EVP_PKEY *private_key;
+#ifndef USE_PKCS7
 	CMS_ContentInfo *cms;
+	unsigned int use_keyid = 0;
+#else
+	PKCS7 *pkcs7;
+#endif
 	X509 *x509;
 	BIO *b, *bd = NULL, *bm;
 	int opt, n;
-
 	OpenSSL_add_all_algorithms();
 	ERR_load_crypto_strings();
 	ERR_clear_error();
 
 	key_pass = getenv("KBUILD_SIGN_PIN");
 
+#ifndef USE_PKCS7
+	use_signed_attrs = CMS_NOATTR;
+#else
+	use_signed_attrs = PKCS7_NOATTR;
+#endif
+
 	do {
 		opt = getopt(argc, argv, "dpk");
 		switch (opt) {
-		case 'p': save_cms = true; break;
-		case 'd': sign_only = true; save_cms = true; break;
+		case 'p': save_sig = true; break;
+		case 'd': sign_only = true; save_sig = true; break;
+#ifndef USE_PKCS7
 		case 'k': use_keyid = CMS_USE_KEYID; break;
+#endif
 		case -1: break;
 		default: format();
 		}
@@ -154,6 +190,14 @@
 		replace_orig = true;
 	}
 
+#ifdef USE_PKCS7
+	if (strcmp(hash_algo, "sha1") != 0) {
+		fprintf(stderr, "sign-file: %s only supports SHA1 signing\n",
+			OPENSSL_VERSION_TEXT);
+		exit(3);
+	}
+#endif
+
 	/* Read the private key and the X.509 cert the PKCS#7 message
 	 * will point to.
 	 */
@@ -210,7 +254,8 @@
 	bm = BIO_new_file(module_name, "rb");
 	ERR(!bm, "%s", module_name);
 
-	/* Load the CMS message from the digest buffer. */
+#ifndef USE_PKCS7
+	/* Load the signature message from the digest buffer. */
 	cms = CMS_sign(NULL, NULL, NULL, NULL,
 		       CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM);
 	ERR(!cms, "CMS_sign");
@@ -218,17 +263,31 @@
 	ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo,
 			     CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP |
 			     use_keyid | use_signed_attrs),
-	    "CMS_sign_add_signer");
+	    "CMS_add1_signer");
 	ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0,
 	    "CMS_final");
 
-	if (save_cms) {
-		char *cms_name;
+#else
+	pkcs7 = PKCS7_sign(x509, private_key, NULL, bm,
+			   PKCS7_NOCERTS | PKCS7_BINARY |
+			   PKCS7_DETACHED | use_signed_attrs);
+	ERR(!pkcs7, "PKCS7_sign");
+#endif
 
-		ERR(asprintf(&cms_name, "%s.p7s", module_name) < 0, "asprintf");
-		b = BIO_new_file(cms_name, "wb");
-		ERR(!b, "%s", cms_name);
-		ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, "%s", cms_name);
+	if (save_sig) {
+		char *sig_file_name;
+
+		ERR(asprintf(&sig_file_name, "%s.p7s", module_name) < 0,
+		    "asprintf");
+		b = BIO_new_file(sig_file_name, "wb");
+		ERR(!b, "%s", sig_file_name);
+#ifndef USE_PKCS7
+		ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0,
+		    "%s", sig_file_name);
+#else
+		ERR(i2d_PKCS7_bio(b, pkcs7) < 0,
+			"%s", sig_file_name);
+#endif
 		BIO_free(b);
 	}
 
@@ -244,9 +303,13 @@
 	ERR(n < 0, "%s", module_name);
 	module_size = BIO_number_written(bd);
 
+#ifndef USE_PKCS7
 	ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name);
-	cms_size = BIO_number_written(bd) - module_size;
-	sig_info.sig_len = htonl(cms_size);
+#else
+	ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name);
+#endif
+	sig_size = BIO_number_written(bd) - module_size;
+	sig_info.sig_len = htonl(sig_size);
 	ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name);
 	ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name);
 
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 73455089..03c1652 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -401,7 +401,7 @@
 	bool match = false;
 
 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
-			 lockdep_is_held(&devcgroup_mutex),
+			 !lockdep_is_held(&devcgroup_mutex),
 			 "device_cgroup:verify_new_ex called without proper synchronization");
 
 	if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
diff --git a/security/keys/gc.c b/security/keys/gc.c
index c795237..39eac1f 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -134,6 +134,10 @@
 		kdebug("- %u", key->serial);
 		key_check(key);
 
+		/* Throw away the key data */
+		if (key->type->destroy)
+			key->type->destroy(key);
+
 		security_key_free(key);
 
 		/* deal with the user's key tracking and quota */
@@ -148,10 +152,6 @@
 		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
 			atomic_dec(&key->user->nikeys);
 
-		/* now throw away the key memory */
-		if (key->type->destroy)
-			key->type->destroy(key);
-
 		key_user_put(key->user);
 
 		kfree(key->description);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index e4369d8..6e50841 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4866,7 +4866,7 @@
 	return NF_ACCEPT;
 }
 
-static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops,
+static unsigned int selinux_ipv4_forward(void *priv,
 					 struct sk_buff *skb,
 					 const struct nf_hook_state *state)
 {
@@ -4874,7 +4874,7 @@
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
+static unsigned int selinux_ipv6_forward(void *priv,
 					 struct sk_buff *skb,
 					 const struct nf_hook_state *state)
 {
@@ -4898,7 +4898,7 @@
 	if (sk) {
 		struct sk_security_struct *sksec;
 
-		if (sk->sk_state == TCP_LISTEN)
+		if (sk_listener(sk))
 			/* if the socket is the listening state then this
 			 * packet is a SYN-ACK packet which means it needs to
 			 * be labeled based on the connection/request_sock and
@@ -4924,7 +4924,7 @@
 	return NF_ACCEPT;
 }
 
-static unsigned int selinux_ipv4_output(const struct nf_hook_ops *ops,
+static unsigned int selinux_ipv4_output(void *priv,
 					struct sk_buff *skb,
 					const struct nf_hook_state *state)
 {
@@ -5005,7 +5005,7 @@
 	 *       unfortunately, this means more work, but it is only once per
 	 *       connection. */
 	if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
-	    !(sk != NULL && sk->sk_state == TCP_LISTEN))
+	    !(sk && sk_listener(sk)))
 		return NF_ACCEPT;
 #endif
 
@@ -5022,7 +5022,7 @@
 			secmark_perm = PACKET__SEND;
 			peer_sid = SECINITSID_KERNEL;
 		}
-	} else if (sk->sk_state == TCP_LISTEN) {
+	} else if (sk_listener(sk)) {
 		/* Locally generated packet but the associated socket is in the
 		 * listening state which means this is a SYN-ACK packet.  In
 		 * this particular case the correct security label is assigned
@@ -5033,7 +5033,11 @@
 		 * selinux_inet_conn_request().  See also selinux_ip_output()
 		 * for similar problems. */
 		u32 skb_sid;
-		struct sk_security_struct *sksec = sk->sk_security;
+		struct sk_security_struct *sksec;
+
+		if (sk->sk_state == TCP_NEW_SYN_RECV)
+			sk = inet_reqsk(sk)->rsk_listener;
+		sksec = sk->sk_security;
 		if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
 			return NF_DROP;
 		/* At this point, if the returned skb peerlbl is SECSID_NULL
@@ -5099,7 +5103,7 @@
 	return NF_ACCEPT;
 }
 
-static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops,
+static unsigned int selinux_ipv4_postroute(void *priv,
 					   struct sk_buff *skb,
 					   const struct nf_hook_state *state)
 {
@@ -5107,7 +5111,7 @@
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops,
+static unsigned int selinux_ipv6_postroute(void *priv,
 					   struct sk_buff *skb,
 					   const struct nf_hook_state *state)
 {
diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c
index a455cfc..a9e41da 100644
--- a/security/smack/smack_netfilter.c
+++ b/security/smack/smack_netfilter.c
@@ -21,7 +21,7 @@
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 
-static unsigned int smack_ipv6_output(const struct nf_hook_ops *ops,
+static unsigned int smack_ipv6_output(void *priv,
 					struct sk_buff *skb,
 					const struct nf_hook_state *state)
 {
@@ -38,7 +38,7 @@
 }
 #endif	/* IPV6 */
 
-static unsigned int smack_ipv4_output(const struct nf_hook_ops *ops,
+static unsigned int smack_ipv4_output(void *priv,
 					struct sk_buff *skb,
 					const struct nf_hook_state *state)
 {
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
index 885683a..e040621 100644
--- a/sound/arm/Kconfig
+++ b/sound/arm/Kconfig
@@ -9,6 +9,14 @@
 	  Drivers that are implemented on ASoC can be found in
 	  "ALSA for SoC audio support" section.
 
+config SND_PXA2XX_LIB
+	tristate
+	select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
+	select SND_DMAENGINE_PCM
+
+config SND_PXA2XX_LIB_AC97
+	bool
+
 if SND_ARM
 
 config SND_ARMAACI
@@ -21,13 +29,6 @@
 	tristate
 	select SND_PCM
 
-config SND_PXA2XX_LIB
-	tristate
-	select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
-
-config SND_PXA2XX_LIB_AC97
-	bool
-
 config SND_PXA2XX_AC97
 	tristate "AC97 driver for the Intel PXA2xx chip"
 	depends on ARCH_PXA
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 477742c..58c0aad 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -73,6 +73,7 @@
 	struct clk *hda2codec_2x_clk;
 	struct clk *hda2hdmi_clk;
 	void __iomem *regs;
+	struct work_struct probe_work;
 };
 
 #ifdef CONFIG_PM
@@ -294,7 +295,9 @@
 static int hda_tegra_dev_free(struct snd_device *device)
 {
 	struct azx *chip = device->device_data;
+	struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
 
+	cancel_work_sync(&hda->probe_work);
 	if (azx_bus(chip)->chip_init) {
 		azx_stop_all_streams(chip);
 		azx_stop_chip(chip);
@@ -426,6 +429,9 @@
 /*
  * constructor
  */
+
+static void hda_tegra_probe_work(struct work_struct *work);
+
 static int hda_tegra_create(struct snd_card *card,
 			    unsigned int driver_caps,
 			    struct hda_tegra *hda)
@@ -452,6 +458,8 @@
 	chip->single_cmd = false;
 	chip->snoop = true;
 
+	INIT_WORK(&hda->probe_work, hda_tegra_probe_work);
+
 	err = azx_bus_init(chip, NULL, &hda_tegra_io_ops);
 	if (err < 0)
 		return err;
@@ -499,6 +507,21 @@
 	card->private_data = chip;
 
 	dev_set_drvdata(&pdev->dev, card);
+	schedule_work(&hda->probe_work);
+
+	return 0;
+
+out_free:
+	snd_card_free(card);
+	return err;
+}
+
+static void hda_tegra_probe_work(struct work_struct *work)
+{
+	struct hda_tegra *hda = container_of(work, struct hda_tegra, probe_work);
+	struct azx *chip = &hda->chip;
+	struct platform_device *pdev = to_platform_device(hda->dev);
+	int err;
 
 	err = hda_tegra_first_init(chip, pdev);
 	if (err < 0)
@@ -520,11 +543,8 @@
 	chip->running = 1;
 	snd_hda_set_power_save(&chip->bus, power_save * 1000);
 
-	return 0;
-
-out_free:
-	snd_card_free(card);
-	return err;
+ out_free:
+	return; /* no error return from async probe */
 }
 
 static int hda_tegra_remove(struct platform_device *pdev)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a75b561..afec6dc 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4188,6 +4188,24 @@
 	}
 }
 
+/* fixup for Thinkpad docks: add dock pins, avoid HP parser fixup */
+static void alc_fixup_tpt440_dock(struct hda_codec *codec,
+				  const struct hda_fixup *fix, int action)
+{
+	static const struct hda_pintbl pincfgs[] = {
+		{ 0x16, 0x21211010 }, /* dock headphone */
+		{ 0x19, 0x21a11010 }, /* dock mic */
+		{ }
+	};
+	struct alc_spec *spec = codec->spec;
+
+	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+		spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+		codec->power_save_node = 0; /* avoid click noises */
+		snd_hda_apply_pincfgs(codec, pincfgs);
+	}
+}
+
 static void alc_shutup_dell_xps13(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
@@ -4562,7 +4580,6 @@
 	ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
 	ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
 	ALC292_FIXUP_TPT440_DOCK,
-	ALC292_FIXUP_TPT440_DOCK2,
 	ALC283_FIXUP_BXBT2807_MIC,
 	ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
 	ALC282_FIXUP_ASPIRE_V5_PINS,
@@ -5029,17 +5046,7 @@
 	},
 	[ALC292_FIXUP_TPT440_DOCK] = {
 		.type = HDA_FIXUP_FUNC,
-		.v.func = alc269_fixup_pincfg_no_hp_to_lineout,
-		.chained = true,
-		.chain_id = ALC292_FIXUP_TPT440_DOCK2
-	},
-	[ALC292_FIXUP_TPT440_DOCK2] = {
-		.type = HDA_FIXUP_PINS,
-		.v.pins = (const struct hda_pintbl[]) {
-			{ 0x16, 0x21211010 }, /* dock headphone */
-			{ 0x19, 0x21a11010 }, /* dock mic */
-			{ }
-		},
+		.v.func = alc_fixup_tpt440_dock,
 		.chained = true,
 		.chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
 	},
diff --git a/sound/soc/au1x/psc-i2s.c b/sound/soc/au1x/psc-i2s.c
index 38e853a..0bf9d62 100644
--- a/sound/soc/au1x/psc-i2s.c
+++ b/sound/soc/au1x/psc-i2s.c
@@ -296,7 +296,6 @@
 {
 	struct resource *iores, *dmares;
 	unsigned long sel;
-	int ret;
 	struct au1xpsc_audio_data *wd;
 
 	wd = devm_kzalloc(&pdev->dev, sizeof(struct au1xpsc_audio_data),
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 4972bf3..268a28b 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -732,14 +732,14 @@
 static const struct snd_kcontrol_new rt5645_dac_l_mix[] = {
 	SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER,
 			RT5645_M_ADCMIX_L_SFT, 1, 1),
-	SOC_DAPM_SINGLE("DAC1 Switch", RT5645_AD_DA_MIXER,
+	SOC_DAPM_SINGLE_AUTODISABLE("DAC1 Switch", RT5645_AD_DA_MIXER,
 			RT5645_M_DAC1_L_SFT, 1, 1),
 };
 
 static const struct snd_kcontrol_new rt5645_dac_r_mix[] = {
 	SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER,
 			RT5645_M_ADCMIX_R_SFT, 1, 1),
-	SOC_DAPM_SINGLE("DAC1 Switch", RT5645_AD_DA_MIXER,
+	SOC_DAPM_SINGLE_AUTODISABLE("DAC1 Switch", RT5645_AD_DA_MIXER,
 			RT5645_M_DAC1_R_SFT, 1, 1),
 };
 
@@ -1381,7 +1381,7 @@
 				regmap_write(rt5645->regmap, RT5645_PR_BASE +
 					RT5645_MAMP_INT_REG2, 0xfc00);
 				snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140);
-				mdelay(5);
+				msleep(40);
 				rt5645->hp_on = true;
 			} else {
 				/* depop parameters */
@@ -2829,13 +2829,12 @@
 			snd_soc_dapm_sync(dapm);
 			rt5645->jack_type = SND_JACK_HEADPHONE;
 		}
-
-		snd_soc_update_bits(codec, RT5645_CHARGE_PUMP, 0x0300, 0x0200);
-		snd_soc_write(codec, RT5645_DEPOP_M1, 0x001d);
-		snd_soc_write(codec, RT5645_DEPOP_M1, 0x0001);
 	} else { /* jack out */
 		rt5645->jack_type = 0;
 
+		regmap_update_bits(rt5645->regmap, RT5645_HP_VOL,
+			RT5645_L_MUTE | RT5645_R_MUTE,
+			RT5645_L_MUTE | RT5645_R_MUTE);
 		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
 			RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD);
 		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
@@ -2880,8 +2879,6 @@
 		rt5645->en_button_func = true;
 		regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
 				RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ);
-		regmap_update_bits(rt5645->regmap, RT5645_DEPOP_M1,
-				RT5645_HP_CB_MASK, RT5645_HP_CB_PU);
 		regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1,
 				RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL);
 	}
@@ -3205,6 +3202,13 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
 		},
 	},
+	{
+		.ident = "Google Ultima",
+		.callback = strago_quirk_cb,
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_NAME, "Ultima"),
+		},
+	},
 	{ }
 };
 
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index f2c6ad4..581ec15 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -577,7 +577,6 @@
 	struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
 	unsigned long flags;
 	int ret;
-	const struct firmware *fw;
 	struct spi_message m;
 	struct spi_transfer t;
 	struct dfw_pllrec pll_rec;
@@ -623,14 +622,6 @@
 	wm0010->state = WM0010_OUT_OF_RESET;
 	spin_unlock_irqrestore(&wm0010->irq_lock, flags);
 
-	/* First the bootloader */
-	ret = request_firmware(&fw, "wm0010_stage2.bin", codec->dev);
-	if (ret != 0) {
-		dev_err(codec->dev, "Failed to request stage2 loader: %d\n",
-			ret);
-		goto abort;
-	}
-
 	if (!wait_for_completion_timeout(&wm0010->boot_completion,
 					 msecs_to_jiffies(20)))
 		dev_err(codec->dev, "Failed to get interrupt from DSP\n");
@@ -673,7 +664,7 @@
 
 		img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA);
 		if (!img_swap)
-			goto abort;
+			goto abort_out;
 
 		/* We need to re-order for 0010 */
 		byte_swap_64((u64 *)&pll_rec, img_swap, len);
@@ -688,16 +679,16 @@
 		spi_message_add_tail(&t, &m);
 
 		ret = spi_sync(spi, &m);
-		if (ret != 0) {
+		if (ret) {
 			dev_err(codec->dev, "First PLL write failed: %d\n", ret);
-			goto abort;
+			goto abort_swap;
 		}
 
 		/* Use a second send of the message to get the return status */
 		ret = spi_sync(spi, &m);
-		if (ret != 0) {
+		if (ret) {
 			dev_err(codec->dev, "Second PLL write failed: %d\n", ret);
-			goto abort;
+			goto abort_swap;
 		}
 
 		p = (u32 *)out;
@@ -730,6 +721,10 @@
 
 	return 0;
 
+abort_swap:
+	kfree(img_swap);
+abort_out:
+	kfree(out);
 abort:
 	/* Put the chip back into reset */
 	wm0010_halt(codec);
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index e3b7d0c..dbd8840 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -211,28 +211,38 @@
 	return wm8960_set_deemph(codec);
 }
 
-static const DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 50, 0);
-static const DECLARE_TLV_DB_SCALE(dac_tlv, -12700, 50, 1);
+static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1725, 75, 0);
+static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
 static const DECLARE_TLV_DB_SCALE(bypass_tlv, -2100, 300, 0);
 static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
-static const DECLARE_TLV_DB_SCALE(boost_tlv, -1200, 300, 1);
+static const DECLARE_TLV_DB_SCALE(lineinboost_tlv, -1500, 300, 1);
+static const unsigned int micboost_tlv[] = {
+	TLV_DB_RANGE_HEAD(2),
+	0, 1, TLV_DB_SCALE_ITEM(0, 1300, 0),
+	2, 3, TLV_DB_SCALE_ITEM(2000, 900, 0),
+};
 
 static const struct snd_kcontrol_new wm8960_snd_controls[] = {
 SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL,
-		 0, 63, 0, adc_tlv),
+		 0, 63, 0, inpga_tlv),
 SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL,
 	6, 1, 0),
 SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL,
 	7, 1, 0),
 
 SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
-	       WM8960_INBMIX1, 4, 7, 0, boost_tlv),
+	       WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv),
 SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume",
-	       WM8960_INBMIX1, 1, 7, 0, boost_tlv),
+	       WM8960_INBMIX1, 1, 7, 0, lineinboost_tlv),
 SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume",
-	       WM8960_INBMIX2, 4, 7, 0, boost_tlv),
+	       WM8960_INBMIX2, 4, 7, 0, lineinboost_tlv),
 SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume",
-	       WM8960_INBMIX2, 1, 7, 0, boost_tlv),
+	       WM8960_INBMIX2, 1, 7, 0, lineinboost_tlv),
+SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT1 Volume",
+		WM8960_RINPATH, 4, 3, 0, micboost_tlv),
+SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT1 Volume",
+		WM8960_LINPATH, 4, 3, 0, micboost_tlv),
 
 SOC_DOUBLE_R_TLV("Playback Volume", WM8960_LDAC, WM8960_RDAC,
 		 0, 255, 0, dac_tlv),
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index b4eb975..293e47a 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2944,7 +2944,8 @@
 				   WM8962_DAC_MUTE, val);
 }
 
-#define WM8962_RATES SNDRV_PCM_RATE_8000_96000
+#define WM8962_RATES (SNDRV_PCM_RATE_8000_48000 |\
+		SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
 
 #define WM8962_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index add6bb9..7d45d98 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -663,7 +663,7 @@
 	u8 rx_ser = 0;
 	u8 slots = mcasp->tdm_slots;
 	u8 max_active_serializers = (channels + slots - 1) / slots;
-	int active_serializers, numevt, n;
+	int active_serializers, numevt;
 	u32 reg;
 	/* Default configuration */
 	if (mcasp->version < MCASP_VERSION_3)
@@ -745,9 +745,8 @@
 	 * The number of words for numevt need to be in steps of active
 	 * serializers.
 	 */
-	n = numevt % active_serializers;
-	if (n)
-		numevt += (active_serializers - n);
+	numevt = (numevt / active_serializers) * active_serializers;
+
 	while (period_words % numevt && numevt > 0)
 		numevt -= active_serializers;
 	if (numevt <= 0)
@@ -1299,6 +1298,7 @@
 		.ops 		= &davinci_mcasp_dai_ops,
 
 		.symmetric_samplebits	= 1,
+		.symmetric_rates	= 1,
 	},
 	{
 		.name		= "davinci-mcasp.1",
@@ -1685,7 +1685,7 @@
 
 	irq = platform_get_irq_byname(pdev, "common");
 	if (irq >= 0) {
-		irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common\n",
+		irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common",
 					  dev_name(&pdev->dev));
 		ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
 						davinci_mcasp_common_irq_handler,
@@ -1702,7 +1702,7 @@
 
 	irq = platform_get_irq_byname(pdev, "rx");
 	if (irq >= 0) {
-		irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx\n",
+		irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx",
 					  dev_name(&pdev->dev));
 		ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
 						davinci_mcasp_rx_irq_handler,
@@ -1717,7 +1717,7 @@
 
 	irq = platform_get_irq_byname(pdev, "tx");
 	if (irq >= 0) {
-		irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx\n",
+		irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx",
 					  dev_name(&pdev->dev));
 		ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
 						davinci_mcasp_tx_irq_handler,
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 5aeb6ed..96f55ae 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -488,7 +488,8 @@
 		priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
 	} else {
 		dev_err(&pdev->dev, "unknown Device Tree compatible\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto asrc_fail;
 	}
 
 	/* Common settings for corresponding Freescale CPU DAI driver */
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 8ec6fb2..37c5cd4 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -249,7 +249,8 @@
 
 static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private)
 {
-	return !!(ssi_private->dai_fmt & SND_SOC_DAIFMT_AC97);
+	return (ssi_private->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
+		SND_SOC_DAIFMT_AC97;
 }
 
 static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private)
@@ -947,7 +948,7 @@
 				CCSR_SSI_SCR_TCH_EN);
 	}
 
-	if (fmt & SND_SOC_DAIFMT_AC97)
+	if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_AC97)
 		fsl_ssi_setup_ac97(ssi_private);
 
 	return 0;
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index f6efa9d..b27f25f 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -302,6 +302,10 @@
 	struct sst_hsw_ipc_dx_reply dx;
 	void *dx_context;
 	dma_addr_t dx_context_paddr;
+	enum sst_hsw_device_id dx_dev;
+	enum sst_hsw_device_mclk dx_mclk;
+	enum sst_hsw_device_mode dx_mode;
+	u32 dx_clock_divider;
 
 	/* boot */
 	wait_queue_head_t boot_wait;
@@ -1400,10 +1404,10 @@
 
 	trace_ipc_request("set device config", dev);
 
-	config.ssp_interface = dev;
-	config.clock_frequency = mclk;
-	config.mode = mode;
-	config.clock_divider = clock_divider;
+	hsw->dx_dev = config.ssp_interface = dev;
+	hsw->dx_mclk = config.clock_frequency = mclk;
+	hsw->dx_mode = config.mode = mode;
+	hsw->dx_clock_divider = config.clock_divider = clock_divider;
 	if (mode == SST_HSW_DEVICE_TDM_CLOCK_MASTER)
 		config.channels = 4;
 	else
@@ -1704,10 +1708,10 @@
 		return -EIO;
 	}
 
-	/* Set ADSP SSP port settings */
-	ret = sst_hsw_device_set_config(hsw, SST_HSW_DEVICE_SSP_0,
-					SST_HSW_DEVICE_MCLK_FREQ_24_MHZ,
-					SST_HSW_DEVICE_CLOCK_MASTER, 9);
+	/* Set ADSP SSP port settings - sadly the FW does not store SSP port
+	   settings as part of the PM context. */
+	ret = sst_hsw_device_set_config(hsw, hsw->dx_dev, hsw->dx_mclk,
+					hsw->dx_mode, hsw->dx_clock_divider);
 	if (ret < 0)
 		dev_err(dev, "error: SSP re-initialization failed\n");
 
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index d190fe0..f5baf3c 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -549,6 +549,23 @@
 	memif->substream = substream;
 
 	snd_soc_set_runtime_hwparams(substream, &mtk_afe_hardware);
+
+	/*
+	 * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
+	 * smaller than period_size due to AFE's internal buffer.
+	 * This easily leads to overrun when avail_min is period_size.
+	 * One more period can hold the possible unread buffer.
+	 */
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		ret = snd_pcm_hw_constraint_minmax(runtime,
+						   SNDRV_PCM_HW_PARAM_PERIODS,
+						   3,
+						   mtk_afe_hardware.periods_max);
+		if (ret < 0) {
+			dev_err(afe->dev, "hw_constraint_minmax failed\n");
+			return ret;
+		}
+	}
 	ret = snd_pcm_hw_constraint_integer(runtime,
 					    SNDRV_PCM_HW_PARAM_PERIODS);
 	if (ret < 0)
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 39cea80..f2bf866 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -1,7 +1,6 @@
 config SND_PXA2XX_SOC
 	tristate "SoC Audio for the Intel PXA2xx chip"
 	depends on ARCH_PXA
-	select SND_ARM
 	select SND_PXA2XX_LIB
 	help
 	  Say Y or M if you want to add support for codecs attached to
@@ -25,7 +24,6 @@
 config SND_PXA2XX_SOC_AC97
 	tristate
 	select AC97_BUS
-	select SND_ARM
 	select SND_PXA2XX_LIB_AC97
 	select SND_SOC_AC97_BUS
 
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 1f60546..9e4b04e 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -49,7 +49,7 @@
 	.reset	= pxa2xx_ac97_cold_reset,
 };
 
-static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12;
+static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11;
 static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
 	.addr		= __PREG(PCDR),
 	.addr_width	= DMA_SLAVE_BUSWIDTH_4_BYTES,
@@ -57,7 +57,7 @@
 	.filter_data	= &pxa2xx_ac97_pcm_stereo_in_req,
 };
 
-static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11;
+static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12;
 static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
 	.addr		= __PREG(PCDR),
 	.addr_width	= DMA_SLAVE_BUSWIDTH_4_BYTES,
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index f4bf21a..ff8bda4 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3501,7 +3501,7 @@
 
 	default:
 		WARN(1, "Unknown event %d\n", event);
-		return -EINVAL;
+		ret = -EINVAL;
 	}
 
 out:
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 362c69a..53dd085 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -101,6 +101,15 @@
 			SNDRV_PCM_FMTBIT_S32_LE | \
 			SNDRV_PCM_FMTBIT_U32_LE | \
 			SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
+/*
+ * The dummy CODEC is only meant to be used in situations where there is no
+ * actual hardware.
+ *
+ * If there is actual hardware even if it does not have a control bus
+ * the hardware will still have constraints like supported samplerates, etc.
+ * which should be modelled. And the data flow graph also should be modelled
+ * using DAPM.
+ */
 static struct snd_soc_dai_driver dummy_dai = {
 	.name = "snd-soc-dummy-dai",
 	.playback = {
diff --git a/sound/soc/spear/Kconfig b/sound/soc/spear/Kconfig
index 0a53053..4fb9141 100644
--- a/sound/soc/spear/Kconfig
+++ b/sound/soc/spear/Kconfig
@@ -1,6 +1,6 @@
 config SND_SPEAR_SOC
 	tristate
-	select SND_DMAENGINE_PCM
+	select SND_SOC_GENERIC_DMAENGINE_PCM
 
 config SND_SPEAR_SPDIF_OUT
 	tristate
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index f6eefe1..843f037 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -989,8 +989,8 @@
 	if (!info)
 		return -ENOMEM;
 
-	of_property_read_u32(pnode, "version", &player->ver);
-	if (player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
+	if (of_property_read_u32(pnode, "version", &player->ver) ||
+	    player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
 		dev_err(dev, "Unknown uniperipheral version ");
 		return -EINVAL;
 	}
@@ -998,10 +998,16 @@
 	if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
 		info->underflow_enabled = 1;
 
-	of_property_read_u32(pnode, "uniperiph-id", &info->id);
+	if (of_property_read_u32(pnode, "uniperiph-id", &info->id)) {
+		dev_err(dev, "uniperipheral id not defined");
+		return -EINVAL;
+	}
 
 	/* Read the device mode property */
-	of_property_read_string(pnode, "mode", &mode);
+	if (of_property_read_string(pnode, "mode", &mode)) {
+		dev_err(dev, "uniperipheral mode not defined");
+		return -EINVAL;
+	}
 
 	if (strcasecmp(mode, "hdmi") == 0)
 		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI;
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index c502626..f791239 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -316,7 +316,11 @@
 	if (!info)
 		return -ENOMEM;
 
-	of_property_read_u32(node, "version", &reader->ver);
+	if (of_property_read_u32(node, "version", &reader->ver) ||
+	    reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
+		dev_err(&pdev->dev, "Unknown uniperipheral version ");
+		return -EINVAL;
+	}
 
 	/* Save the info structure */
 	reader->info = info;
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 2975632..c8fe6d1 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -41,6 +41,7 @@
 	libelf-getphdrnum		\
 	libelf-mmap			\
 	libnuma				\
+	numa_num_possible_cpus		\
 	libperl				\
 	libpython			\
 	libpython-version		\
@@ -51,7 +52,8 @@
 	timerfd				\
 	libdw-dwarf-unwind		\
 	zlib				\
-	lzma
+	lzma				\
+	get_cpuid
 
 FEATURE_DISPLAY ?=			\
 	dwarf				\
@@ -61,13 +63,15 @@
 	libbfd				\
 	libelf				\
 	libnuma				\
+	numa_num_possible_cpus		\
 	libperl				\
 	libpython			\
 	libslang			\
 	libunwind			\
 	libdw-dwarf-unwind		\
 	zlib				\
-	lzma
+	lzma				\
+	get_cpuid
 
 # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
 # If in the future we need per-feature checks/flags for features not
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 74ca420..e43a297 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -19,6 +19,7 @@
 	test-libelf-getphdrnum.bin	\
 	test-libelf-mmap.bin		\
 	test-libnuma.bin		\
+	test-numa_num_possible_cpus.bin	\
 	test-libperl.bin		\
 	test-libpython.bin		\
 	test-libpython-version.bin	\
@@ -34,7 +35,8 @@
 	test-compile-x32.bin		\
 	test-zlib.bin			\
 	test-lzma.bin			\
-	test-bpf.bin
+	test-bpf.bin			\
+	test-get_cpuid.bin
 
 CC := $(CROSS_COMPILE)gcc -MD
 PKG_CONFIG := $(CROSS_COMPILE)pkg-config
@@ -87,6 +89,9 @@
 test-libnuma.bin:
 	$(BUILD) -lnuma
 
+test-numa_num_possible_cpus.bin:
+	$(BUILD) -lnuma
+
 test-libunwind.bin:
 	$(BUILD) -lelf
 
@@ -162,6 +167,9 @@
 test-lzma.bin:
 	$(BUILD) -llzma
 
+test-get_cpuid.bin:
+	$(BUILD)
+
 test-bpf.bin:
 	$(BUILD)
 
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 84689a6..33cf6f2 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -77,6 +77,10 @@
 # include "test-libnuma.c"
 #undef main
 
+#define main main_test_numa_num_possible_cpus
+# include "test-numa_num_possible_cpus.c"
+#undef main
+
 #define main main_test_timerfd
 # include "test-timerfd.c"
 #undef main
@@ -117,6 +121,10 @@
 # include "test-lzma.c"
 #undef main
 
+#define main main_test_get_cpuid
+# include "test-get_cpuid.c"
+#undef main
+
 int main(int argc, char *argv[])
 {
 	main_test_libpython();
@@ -136,6 +144,7 @@
 	main_test_libbfd();
 	main_test_backtrace();
 	main_test_libnuma();
+	main_test_numa_num_possible_cpus();
 	main_test_timerfd();
 	main_test_stackprotector_all();
 	main_test_libdw_dwarf_unwind();
@@ -143,6 +152,7 @@
 	main_test_zlib();
 	main_test_pthread_attr_setaffinity_np();
 	main_test_lzma();
+	main_test_get_cpuid();
 
 	return 0;
 }
diff --git a/tools/build/feature/test-get_cpuid.c b/tools/build/feature/test-get_cpuid.c
new file mode 100644
index 0000000..d7a2c40
--- /dev/null
+++ b/tools/build/feature/test-get_cpuid.c
@@ -0,0 +1,7 @@
+#include <cpuid.h>
+
+int main(void)
+{
+	unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
+	return __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
+}
diff --git a/tools/build/feature/test-numa_num_possible_cpus.c b/tools/build/feature/test-numa_num_possible_cpus.c
new file mode 100644
index 0000000..2606e94
--- /dev/null
+++ b/tools/build/feature/test-numa_num_possible_cpus.c
@@ -0,0 +1,6 @@
+#include <numa.h>
+
+int main(void)
+{
+	return numa_num_possible_cpus();
+}
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 4d88593..cf42b09 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -3795,7 +3795,7 @@
 	struct format_field *field;
 	struct printk_map *printk;
 	long long val, fval;
-	unsigned long addr;
+	unsigned long long addr;
 	char *str;
 	unsigned char *hex;
 	int print;
@@ -3828,13 +3828,30 @@
 		 */
 		if (!(field->flags & FIELD_IS_ARRAY) &&
 		    field->size == pevent->long_size) {
-			addr = *(unsigned long *)(data + field->offset);
+
+			/* Handle heterogeneous recording and processing
+			 * architectures
+			 *
+			 * CASE I:
+			 * Traces recorded on 32-bit devices (32-bit
+			 * addressing) and processed on 64-bit devices:
+			 * In this case, only 32 bits should be read.
+			 *
+			 * CASE II:
+			 * Traces recorded on 64 bit devices and processed
+			 * on 32-bit devices:
+			 * In this case, 64 bits must be read.
+			 */
+			addr = (pevent->long_size == 8) ?
+				*(unsigned long long *)(data + field->offset) :
+				(unsigned long long)*(unsigned int *)(data + field->offset);
+
 			/* Check if it matches a print format */
 			printk = find_printk(pevent, addr);
 			if (printk)
 				trace_seq_puts(s, printk->printk);
 			else
-				trace_seq_printf(s, "%lx", addr);
+				trace_seq_printf(s, "%llx", addr);
 			break;
 		}
 		str = malloc(len + 1);
diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c
index 2cd3d4c..5b32413 100644
--- a/tools/net/bpf_jit_disasm.c
+++ b/tools/net/bpf_jit_disasm.c
@@ -156,8 +156,8 @@
 	free(buff);
 }
 
-static int get_last_jit_image(char *haystack, size_t hlen,
-			      uint8_t *image, size_t ilen)
+static unsigned int get_last_jit_image(char *haystack, size_t hlen,
+				       uint8_t *image, size_t ilen)
 {
 	char *ptr, *pptr, *tmp;
 	off_t off = 0;
diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt
index 4a0501d..c94c9de 100644
--- a/tools/perf/Documentation/intel-pt.txt
+++ b/tools/perf/Documentation/intel-pt.txt
@@ -364,21 +364,6 @@
 
 		CYC packets are not requested by default.
 
-no_force_psb	This is a driver option and is not in the IA32_RTIT_CTL MSR.
-
-		It stops the driver resetting the byte count to zero whenever
-		enabling the trace (for example on context switches) which in
-		turn results in no PSB being forced.  However some processors
-		will produce a PSB anyway.
-
-		In any case, there is still a PSB when the trace is enabled for
-		the first time.
-
-		no_force_psb can be used to slightly decrease the trace size but
-		may make it harder for the decoder to recover from errors.
-
-		no_force_psb is not selected by default.
-
 
 new snapshot option
 -------------------
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index eb51325..284a76e 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -768,8 +768,8 @@
 	if (!evsel->attr.sample_id_all) {
 		sample->cpu = 0;
 		sample->time = 0;
-		sample->tid = event->comm.tid;
-		sample->pid = event->comm.pid;
+		sample->tid = event->fork.tid;
+		sample->pid = event->fork.pid;
 	}
 	print_sample_start(sample, thread, evsel);
 	perf_event__fprintf(event, stdout);
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 827557f..38a0853 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -573,9 +573,14 @@
     msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev);
     NO_LIBNUMA := 1
   else
-    CFLAGS += -DHAVE_LIBNUMA_SUPPORT
-    EXTLIBS += -lnuma
-    $(call detected,CONFIG_NUMA)
+    ifeq ($(feature-numa_num_possible_cpus), 0)
+      msg := $(warning Old numa library found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev >= 2.0.8);
+      NO_LIBNUMA := 1
+    else
+      CFLAGS += -DHAVE_LIBNUMA_SUPPORT
+      EXTLIBS += -lnuma
+      $(call detected,CONFIG_NUMA)
+    endif
   endif
 endif
 
@@ -621,8 +626,13 @@
 endif
 
 ifndef NO_AUXTRACE
-  $(call detected,CONFIG_AUXTRACE)
-  CFLAGS += -DHAVE_AUXTRACE_SUPPORT
+  ifeq ($(feature-get_cpuid), 0)
+    msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
+    NO_AUXTRACE := 1
+  else
+    $(call detected,CONFIG_AUXTRACE)
+    CFLAGS += -DHAVE_AUXTRACE_SUPPORT
+  endif
 endif
 
 # Among the variables below, these:
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 1aa21c9..5b83f56 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -34,6 +34,8 @@
 		.disabled = 1,
 		.freq = 1,
 	};
+	struct cpu_map *cpus;
+	struct thread_map *threads;
 
 	attr.sample_freq = 500;
 
@@ -50,14 +52,19 @@
 	}
 	perf_evlist__add(evlist, evsel);
 
-	evlist->cpus = cpu_map__dummy_new();
-	evlist->threads = thread_map__new_by_tid(getpid());
-	if (!evlist->cpus || !evlist->threads) {
+	cpus = cpu_map__dummy_new();
+	threads = thread_map__new_by_tid(getpid());
+	if (!cpus || !threads) {
 		err = -ENOMEM;
 		pr_debug("Not enough memory to create thread/cpu maps\n");
-		goto out_delete_evlist;
+		goto out_free_maps;
 	}
 
+	perf_evlist__set_maps(evlist, cpus, threads);
+
+	cpus	= NULL;
+	threads = NULL;
+
 	if (perf_evlist__open(evlist)) {
 		const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate";
 
@@ -107,6 +114,9 @@
 		err = -1;
 	}
 
+out_free_maps:
+	cpu_map__put(cpus);
+	thread_map__put(threads);
 out_delete_evlist:
 	perf_evlist__delete(evlist);
 	return err;
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 3a8fedef..add1638 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -43,6 +43,8 @@
 	};
 	const char *argv[] = { "true", NULL };
 	char sbuf[STRERR_BUFSIZE];
+	struct cpu_map *cpus;
+	struct thread_map *threads;
 
 	signal(SIGCHLD, sig_handler);
 
@@ -58,14 +60,19 @@
 	 * perf_evlist__prepare_workload we'll fill in the only thread
 	 * we're monitoring, the one forked there.
 	 */
-	evlist->cpus = cpu_map__dummy_new();
-	evlist->threads = thread_map__new_by_tid(-1);
-	if (!evlist->cpus || !evlist->threads) {
+	cpus = cpu_map__dummy_new();
+	threads = thread_map__new_by_tid(-1);
+	if (!cpus || !threads) {
 		err = -ENOMEM;
 		pr_debug("Not enough memory to create thread/cpu maps\n");
-		goto out_delete_evlist;
+		goto out_free_maps;
 	}
 
+	perf_evlist__set_maps(evlist, cpus, threads);
+
+	cpus	= NULL;
+	threads = NULL;
+
 	err = perf_evlist__prepare_workload(evlist, &target, argv, false,
 					    workload_exec_failed_signal);
 	if (err < 0) {
@@ -114,6 +121,9 @@
 		err = -1;
 	}
 
+out_free_maps:
+	cpu_map__put(cpus);
+	thread_map__put(threads);
 out_delete_evlist:
 	perf_evlist__delete(evlist);
 	return err;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index cf86f2d..c04c60d 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1968,7 +1968,8 @@
 					  &options[nr_options], dso);
 		nr_options += add_map_opt(browser, &actions[nr_options],
 					  &options[nr_options],
-					  browser->selection->map);
+					  browser->selection ?
+						browser->selection->map : NULL);
 
 		/* perf script support */
 		if (browser->he_selection) {
@@ -1976,6 +1977,15 @@
 						     &actions[nr_options],
 						     &options[nr_options],
 						     thread, NULL);
+			/*
+			 * Note that browser->selection != NULL
+			 * when browser->he_selection is not NULL,
+			 * so we don't need to check browser->selection
+			 * before fetching browser->selection->sym like what
+			 * we do before fetching browser->selection->map.
+			 *
+			 * See hist_browser__show_entry.
+			 */
 			nr_options += add_script_opt(browser,
 						     &actions[nr_options],
 						     &options[nr_options],
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index d51a520..c8fc8a2 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -124,6 +124,33 @@
 	free(evlist);
 }
 
+static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
+					  struct perf_evsel *evsel)
+{
+	/*
+	 * We already have cpus for evsel (via PMU sysfs) so
+	 * keep it, if there's no target cpu list defined.
+	 */
+	if (!evsel->own_cpus || evlist->has_user_cpus) {
+		cpu_map__put(evsel->cpus);
+		evsel->cpus = cpu_map__get(evlist->cpus);
+	} else if (evsel->cpus != evsel->own_cpus) {
+		cpu_map__put(evsel->cpus);
+		evsel->cpus = cpu_map__get(evsel->own_cpus);
+	}
+
+	thread_map__put(evsel->threads);
+	evsel->threads = thread_map__get(evlist->threads);
+}
+
+static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel;
+
+	evlist__for_each(evlist, evsel)
+		__perf_evlist__propagate_maps(evlist, evsel);
+}
+
 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
 {
 	entry->evlist = evlist;
@@ -133,18 +160,19 @@
 
 	if (!evlist->nr_entries++)
 		perf_evlist__set_id_pos(evlist);
+
+	__perf_evlist__propagate_maps(evlist, entry);
 }
 
 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
-				   struct list_head *list,
-				   int nr_entries)
+				   struct list_head *list)
 {
-	bool set_id_pos = !evlist->nr_entries;
+	struct perf_evsel *evsel, *temp;
 
-	list_splice_tail(list, &evlist->entries);
-	evlist->nr_entries += nr_entries;
-	if (set_id_pos)
-		perf_evlist__set_id_pos(evlist);
+	__evlist__for_each_safe(list, temp, evsel) {
+		list_del_init(&evsel->node);
+		perf_evlist__add(evlist, evsel);
+	}
 }
 
 void __perf_evlist__set_leader(struct list_head *list)
@@ -210,7 +238,7 @@
 		list_add_tail(&evsel->node, &head);
 	}
 
-	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
+	perf_evlist__splice_list_tail(evlist, &head);
 
 	return 0;
 
@@ -1103,71 +1131,56 @@
 	return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
 }
 
-static int perf_evlist__propagate_maps(struct perf_evlist *evlist,
-				       bool has_user_cpus)
-{
-	struct perf_evsel *evsel;
-
-	evlist__for_each(evlist, evsel) {
-		/*
-		 * We already have cpus for evsel (via PMU sysfs) so
-		 * keep it, if there's no target cpu list defined.
-		 */
-		if (evsel->cpus && has_user_cpus)
-			cpu_map__put(evsel->cpus);
-
-		if (!evsel->cpus || has_user_cpus)
-			evsel->cpus = cpu_map__get(evlist->cpus);
-
-		evsel->threads = thread_map__get(evlist->threads);
-
-		if ((evlist->cpus && !evsel->cpus) ||
-		    (evlist->threads && !evsel->threads))
-			return -ENOMEM;
-	}
-
-	return 0;
-}
-
 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
 {
-	evlist->threads = thread_map__new_str(target->pid, target->tid,
-					      target->uid);
+	struct cpu_map *cpus;
+	struct thread_map *threads;
 
-	if (evlist->threads == NULL)
+	threads = thread_map__new_str(target->pid, target->tid, target->uid);
+
+	if (!threads)
 		return -1;
 
 	if (target__uses_dummy_map(target))
-		evlist->cpus = cpu_map__dummy_new();
+		cpus = cpu_map__dummy_new();
 	else
-		evlist->cpus = cpu_map__new(target->cpu_list);
+		cpus = cpu_map__new(target->cpu_list);
 
-	if (evlist->cpus == NULL)
+	if (!cpus)
 		goto out_delete_threads;
 
-	return perf_evlist__propagate_maps(evlist, !!target->cpu_list);
+	evlist->has_user_cpus = !!target->cpu_list;
+
+	perf_evlist__set_maps(evlist, cpus, threads);
+
+	return 0;
 
 out_delete_threads:
-	thread_map__put(evlist->threads);
-	evlist->threads = NULL;
+	thread_map__put(threads);
 	return -1;
 }
 
-int perf_evlist__set_maps(struct perf_evlist *evlist,
-			  struct cpu_map *cpus,
-			  struct thread_map *threads)
+void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
+			   struct thread_map *threads)
 {
-	if (evlist->cpus)
+	/*
+	 * Allow for the possibility that one or another of the maps isn't being
+	 * changed i.e. don't put it.  Note we are assuming the maps that are
+	 * being applied are brand new and evlist is taking ownership of the
+	 * original reference count of 1.  If that is not the case it is up to
+	 * the caller to increase the reference count.
+	 */
+	if (cpus != evlist->cpus) {
 		cpu_map__put(evlist->cpus);
+		evlist->cpus = cpus;
+	}
 
-	evlist->cpus = cpus;
-
-	if (evlist->threads)
+	if (threads != evlist->threads) {
 		thread_map__put(evlist->threads);
+		evlist->threads = threads;
+	}
 
-	evlist->threads = threads;
-
-	return perf_evlist__propagate_maps(evlist, false);
+	perf_evlist__propagate_maps(evlist);
 }
 
 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
@@ -1387,6 +1400,8 @@
 
 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
 {
+	struct cpu_map	  *cpus;
+	struct thread_map *threads;
 	int err = -ENOMEM;
 
 	/*
@@ -1398,20 +1413,19 @@
 	 * error, and we may not want to do that fallback to a
 	 * default cpu identity map :-\
 	 */
-	evlist->cpus = cpu_map__new(NULL);
-	if (evlist->cpus == NULL)
+	cpus = cpu_map__new(NULL);
+	if (!cpus)
 		goto out;
 
-	evlist->threads = thread_map__new_dummy();
-	if (evlist->threads == NULL)
-		goto out_free_cpus;
+	threads = thread_map__new_dummy();
+	if (!threads)
+		goto out_put;
 
-	err = 0;
+	perf_evlist__set_maps(evlist, cpus, threads);
 out:
 	return err;
-out_free_cpus:
-	cpu_map__put(evlist->cpus);
-	evlist->cpus = NULL;
+out_put:
+	cpu_map__put(cpus);
 	goto out;
 }
 
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index b39a619..115d8b5 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -42,6 +42,7 @@
 	int		 nr_mmaps;
 	bool		 overwrite;
 	bool		 enabled;
+	bool		 has_user_cpus;
 	size_t		 mmap_len;
 	int		 id_pos;
 	int		 is_pos;
@@ -155,9 +156,8 @@
 void perf_evlist__set_selected(struct perf_evlist *evlist,
 			       struct perf_evsel *evsel);
 
-int perf_evlist__set_maps(struct perf_evlist *evlist,
-			  struct cpu_map *cpus,
-			  struct thread_map *threads);
+void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
+			   struct thread_map *threads);
 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
 
@@ -179,8 +179,7 @@
 bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
 
 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
-				   struct list_head *list,
-				   int nr_entries);
+				   struct list_head *list);
 
 static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
 {
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c53f791..5410483 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1033,6 +1033,7 @@
 	perf_evsel__free_config_terms(evsel);
 	close_cgroup(evsel->cgrp);
 	cpu_map__put(evsel->cpus);
+	cpu_map__put(evsel->own_cpus);
 	thread_map__put(evsel->threads);
 	zfree(&evsel->group_name);
 	zfree(&evsel->name);
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 298e6bb..ef8925f 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -98,6 +98,7 @@
 	struct cgroup_sel	*cgrp;
 	void			*handler;
 	struct cpu_map		*cpus;
+	struct cpu_map		*own_cpus;
 	struct thread_map	*threads;
 	unsigned int		sample_size;
 	int			id_pos;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 4181454..fce6634 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1438,7 +1438,7 @@
 	if (ph->needs_swap)
 		nr = bswap_32(nr);
 
-	ph->env.nr_cpus_online = nr;
+	ph->env.nr_cpus_avail = nr;
 
 	ret = readn(fd, &nr, sizeof(nr));
 	if (ret != sizeof(nr))
@@ -1447,7 +1447,7 @@
 	if (ph->needs_swap)
 		nr = bswap_32(nr);
 
-	ph->env.nr_cpus_avail = nr;
+	ph->env.nr_cpus_online = nr;
 	return 0;
 }
 
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index ea76862..eb0e7f8 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -623,7 +623,7 @@
 	if (err)
 		return err;
 	if (event->header.type == PERF_RECORD_EXIT) {
-		err = intel_bts_process_tid_exit(bts, event->comm.tid);
+		err = intel_bts_process_tid_exit(bts, event->fork.tid);
 		if (err)
 			return err;
 	}
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index bb41c20..535d86f 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1494,7 +1494,7 @@
 	if (pt->timeless_decoding) {
 		if (event->header.type == PERF_RECORD_EXIT) {
 			err = intel_pt_process_timeless_queues(pt,
-							       event->comm.tid,
+							       event->fork.tid,
 							       sample->time);
 		}
 	} else if (timestamp) {
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index d826e6f..21ed6ee 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -287,8 +287,8 @@
 	if (!evsel)
 		return NULL;
 
-	if (cpus)
-		evsel->cpus = cpu_map__get(cpus);
+	evsel->cpus     = cpu_map__get(cpus);
+	evsel->own_cpus = cpu_map__get(cpus);
 
 	if (name)
 		evsel->name = strdup(name);
@@ -1140,10 +1140,9 @@
 	ret = parse_events__scanner(str, &data, PE_START_EVENTS);
 	perf_pmu__parse_cleanup();
 	if (!ret) {
-		int entries = data.idx - evlist->nr_entries;
 		struct perf_evsel *last;
 
-		perf_evlist__splice_list_tail(evlist, &data.list, entries);
+		perf_evlist__splice_list_tail(evlist, &data.list);
 		evlist->nr_groups += data.nr_groups;
 		last = perf_evlist__last(evlist);
 		last->cmdline_group_boundary = true;
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 591905a..9cd7081 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -255,7 +255,7 @@
 	list_add_tail(&term->list, head);
 
 	ALLOC_LIST(list);
-	ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head));
+	ABORT_ON(parse_events_add_pmu(data, list, "cpu", head));
 	parse_events__free_terms(head);
 	$$ = list;
 }
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index eb5f18b..c6f9af7 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -270,12 +270,13 @@
 	int ret = 0;
 
 	if (module) {
-		list_for_each_entry(dso, &host_machine->dsos.head, node) {
-			if (!dso->kernel)
-				continue;
-			if (strncmp(dso->short_name + 1, module,
-				    dso->short_name_len - 2) == 0)
-				goto found;
+		char module_name[128];
+
+		snprintf(module_name, sizeof(module_name), "[%s]", module);
+		map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name);
+		if (map) {
+			dso = map->dso;
+			goto found;
 		}
 		pr_debug("Failed to find module %s.\n", module);
 		return -ENOENT;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 8a4537e..fc3f7c9 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1580,7 +1580,10 @@
 	file_offset = page_offset;
 	head = data_offset - page_offset;
 
-	if (data_size && (data_offset + data_size < file_size))
+	if (data_size == 0)
+		goto out;
+
+	if (data_offset + data_size < file_size)
 		file_size = data_offset + data_size;
 
 	ui_progress__init(&prog, file_size, "Processing events...");
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 415c359..2d065d0 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -196,7 +196,8 @@
 		memset(counter->per_pkg_mask, 0, MAX_NR_CPUS);
 }
 
-static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip)
+static int check_per_pkg(struct perf_evsel *counter,
+			 struct perf_counts_values *vals, int cpu, bool *skip)
 {
 	unsigned long *mask = counter->per_pkg_mask;
 	struct cpu_map *cpus = perf_evsel__cpus(counter);
@@ -218,6 +219,17 @@
 		counter->per_pkg_mask = mask;
 	}
 
+	/*
+	 * we do not consider an event that has not run as a good
+	 * instance to mark a package as used (skip=1). Otherwise
+	 * we may run into a situation where the first CPU in a package
+	 * is not running anything, yet the second is, and this function
+	 * would mark the package as used after the first CPU and would
+	 * not read the values from the second CPU.
+	 */
+	if (!(vals->run && vals->ena))
+		return 0;
+
 	s = cpu_map__get_socket(cpus, cpu);
 	if (s < 0)
 		return -1;
@@ -235,7 +247,7 @@
 	static struct perf_counts_values zero;
 	bool skip = false;
 
-	if (check_per_pkg(evsel, cpu, &skip)) {
+	if (check_per_pkg(evsel, count, cpu, &skip)) {
 		pr_err("failed to read per-pkg counter\n");
 		return -1;
 	}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 53bb5f5..475d88d 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -38,7 +38,7 @@
 #endif
 
 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
-int elf_getphdrnum(Elf *elf, size_t *dst)
+static int elf_getphdrnum(Elf *elf, size_t *dst)
 {
 	GElf_Ehdr gehdr;
 	GElf_Ehdr *ehdr;
@@ -1271,8 +1271,6 @@
 static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
 		       bool temp)
 {
-	GElf_Ehdr *ehdr;
-
 	kcore->elfclass = elfclass;
 
 	if (temp)
@@ -1289,9 +1287,7 @@
 	if (!gelf_newehdr(kcore->elf, elfclass))
 		goto out_end;
 
-	ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
-	if (!ehdr)
-		goto out_end;
+	memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
 
 	return 0;
 
@@ -1348,23 +1344,18 @@
 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
 			   u64 addr, u64 len)
 {
-	GElf_Phdr gphdr;
-	GElf_Phdr *phdr;
+	GElf_Phdr phdr = {
+		.p_type		= PT_LOAD,
+		.p_flags	= PF_R | PF_W | PF_X,
+		.p_offset	= offset,
+		.p_vaddr	= addr,
+		.p_paddr	= 0,
+		.p_filesz	= len,
+		.p_memsz	= len,
+		.p_align	= page_size,
+	};
 
-	phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
-	if (!phdr)
-		return -1;
-
-	phdr->p_type	= PT_LOAD;
-	phdr->p_flags	= PF_R | PF_W | PF_X;
-	phdr->p_offset	= offset;
-	phdr->p_vaddr	= addr;
-	phdr->p_paddr	= 0;
-	phdr->p_filesz	= len;
-	phdr->p_memsz	= len;
-	phdr->p_align	= page_size;
-
-	if (!gelf_update_phdr(kcore->elf, idx, phdr))
+	if (!gelf_update_phdr(kcore->elf, idx, &phdr))
 		return -1;
 
 	return 0;
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 7acafb3..c2cd9bf2 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -709,7 +709,7 @@
 
 	dir = opendir(procfs__mountpoint());
 	if (!dir)
-		return -1;
+		return false;
 
 	/* Walk through the directory. */
 	while (ret && (d = readdir(dir)) != NULL) {
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 9655cb4..bde0ef1 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -71,8 +71,11 @@
 unsigned int extra_msr_offset64;
 unsigned int extra_delta_offset32;
 unsigned int extra_delta_offset64;
+unsigned int aperf_mperf_multiplier = 1;
 int do_smi;
 double bclk;
+double base_hz;
+double tsc_tweak = 1.0;
 unsigned int show_pkg;
 unsigned int show_core;
 unsigned int show_cpu;
@@ -502,7 +505,7 @@
 	/* %Busy */
 	if (has_aperf) {
 		if (!skip_c0)
-			outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc);
+			outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak);
 		else
 			outp += sprintf(outp, "********");
 	}
@@ -510,7 +513,7 @@
 	/* Bzy_MHz */
 	if (has_aperf)
 		outp += sprintf(outp, "%8.0f",
-			1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
+			1.0 * t->tsc * tsc_tweak / units * t->aperf / t->mperf / interval_float);
 
 	/* TSC_MHz */
 	outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
@@ -984,6 +987,8 @@
 			return -3;
 		if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
 			return -4;
+		t->aperf = t->aperf * aperf_mperf_multiplier;
+		t->mperf = t->mperf * aperf_mperf_multiplier;
 	}
 
 	if (do_smi) {
@@ -1149,6 +1154,19 @@
 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 
+
+static void
+calculate_tsc_tweak()
+{
+	unsigned long long msr;
+	unsigned int base_ratio;
+
+	get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
+	base_ratio = (msr >> 8) & 0xFF;
+	base_hz = base_ratio * bclk * 1000000;
+	tsc_tweak = base_hz / tsc_hz;
+}
+
 static void
 dump_nhm_platform_info(void)
 {
@@ -1926,8 +1944,6 @@
 
 	switch (model) {
 	case 0x3A:	/* IVB */
-	case 0x3E:	/* IVB Xeon */
-
 	case 0x3C:	/* HSW */
 	case 0x3F:	/* HSX */
 	case 0x45:	/* HSW */
@@ -2543,6 +2559,13 @@
 	return 0;
 }
 
+unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model)
+{
+	if (is_knl(family, model))
+		return 1024;
+	return 1;
+}
+
 #define SLM_BCLK_FREQS 5
 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
 
@@ -2744,6 +2767,9 @@
 		}
 	}
 
+	if (has_aperf)
+		aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model);
+
 	do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
 	do_snb_cstates = has_snb_msrs(family, model);
 	do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
@@ -2762,6 +2788,9 @@
 	if (debug)
 		dump_cstate_pstate_config_info();
 
+	if (has_skl_msrs(family, model))
+		calculate_tsc_tweak();
+
 	return;
 }
 
@@ -3090,7 +3119,7 @@
 }
 
 void print_version() {
-	fprintf(stderr, "turbostat version 4.7 17-June, 2015"
+	fprintf(stderr, "turbostat version 4.8 26-Sep, 2015"
 		" - Len Brown <lenb@kernel.org>\n");
 }
 
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 0501511..cfe1213 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -6,6 +6,7 @@
 TARGETS += ftrace
 TARGETS += futex
 TARGETS += kcmp
+TARGETS += membarrier
 TARGETS += memfd
 TARGETS += memory-hotplug
 TARGETS += mount
@@ -15,12 +16,12 @@
 TARGETS += ptrace
 TARGETS += seccomp
 TARGETS += size
+TARGETS += static_keys
 TARGETS += sysctl
 ifneq (1, $(quicktest))
 TARGETS += timers
 endif
 TARGETS += user
-TARGETS += jumplabel
 TARGETS += vm
 TARGETS += x86
 TARGETS += zram
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index 6b76bfd..4e400eb 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -1,6 +1,6 @@
 CFLAGS = -Wall
 BINARIES = execveat
-DEPS = execveat.symlink execveat.denatured script
+DEPS = execveat.symlink execveat.denatured script subdir
 all: $(BINARIES) $(DEPS)
 
 subdir:
@@ -22,7 +22,5 @@
 
 include ../lib.mk
 
-override EMIT_TESTS := echo "mkdir -p subdir; (./execveat && echo \"selftests: execveat [PASS]\") || echo \"selftests: execveat [FAIL]\""
-
 clean:
 	rm -rf $(BINARIES) $(DEPS) subdir.moved execveat.moved xxxxx*
diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile
index 0acbeca..4e6ed13 100644
--- a/tools/testing/selftests/ftrace/Makefile
+++ b/tools/testing/selftests/ftrace/Makefile
@@ -1,7 +1,7 @@
 all:
 
 TEST_PROGS := ftracetest
-TEST_DIRS := test.d/
+TEST_DIRS := test.d
 
 include ../lib.mk
 
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 97f1c67..50a93f5 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -12,13 +12,10 @@
 	$(RUN_TESTS)
 
 define INSTALL_RULE
-	@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then			\
-		mkdir -p $(INSTALL_PATH);								\
-		for TEST_DIR in $(TEST_DIRS); do							\
-			cp -r $$TEST_DIR $(INSTALL_PATH);						\
-		done;											\
-		echo "install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)";	\
-		install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES);		\
+	@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then					\
+		mkdir -p ${INSTALL_PATH};										\
+		echo "rsync -a $(TEST_DIRS) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/";	\
+		rsync -a $(TEST_DIRS) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/;		\
 	fi
 endef
 
diff --git a/tools/testing/selftests/membarrier/.gitignore b/tools/testing/selftests/membarrier/.gitignore
new file mode 100644
index 0000000..020c44f4
--- /dev/null
+++ b/tools/testing/selftests/membarrier/.gitignore
@@ -0,0 +1 @@
+membarrier_test
diff --git a/tools/testing/selftests/membarrier/Makefile b/tools/testing/selftests/membarrier/Makefile
new file mode 100644
index 0000000..a1a9708
--- /dev/null
+++ b/tools/testing/selftests/membarrier/Makefile
@@ -0,0 +1,10 @@
+CFLAGS += -g -I../../../../usr/include/
+
+TEST_PROGS := membarrier_test
+
+all: $(TEST_PROGS)
+
+include ../lib.mk
+
+clean:
+	$(RM) $(TEST_PROGS)
diff --git a/tools/testing/selftests/membarrier/membarrier_test.c b/tools/testing/selftests/membarrier/membarrier_test.c
new file mode 100644
index 0000000..535f0fe
--- /dev/null
+++ b/tools/testing/selftests/membarrier/membarrier_test.c
@@ -0,0 +1,118 @@
+#define _GNU_SOURCE
+#include <linux/membarrier.h>
+#include <syscall.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+
+#include "../kselftest.h"
+
+enum test_membarrier_status {
+	TEST_MEMBARRIER_PASS = 0,
+	TEST_MEMBARRIER_FAIL,
+	TEST_MEMBARRIER_SKIP,
+};
+
+static int sys_membarrier(int cmd, int flags)
+{
+	return syscall(__NR_membarrier, cmd, flags);
+}
+
+static enum test_membarrier_status test_membarrier_cmd_fail(void)
+{
+	int cmd = -1, flags = 0;
+
+	if (sys_membarrier(cmd, flags) != -1) {
+		printf("membarrier: Wrong command should fail but passed.\n");
+		return TEST_MEMBARRIER_FAIL;
+	}
+	return TEST_MEMBARRIER_PASS;
+}
+
+static enum test_membarrier_status test_membarrier_flags_fail(void)
+{
+	int cmd = MEMBARRIER_CMD_QUERY, flags = 1;
+
+	if (sys_membarrier(cmd, flags) != -1) {
+		printf("membarrier: Wrong flags should fail but passed.\n");
+		return TEST_MEMBARRIER_FAIL;
+	}
+	return TEST_MEMBARRIER_PASS;
+}
+
+static enum test_membarrier_status test_membarrier_success(void)
+{
+	int cmd = MEMBARRIER_CMD_SHARED, flags = 0;
+
+	if (sys_membarrier(cmd, flags) != 0) {
+		printf("membarrier: Executing MEMBARRIER_CMD_SHARED failed. %s.\n",
+				strerror(errno));
+		return TEST_MEMBARRIER_FAIL;
+	}
+
+	printf("membarrier: MEMBARRIER_CMD_SHARED success.\n");
+	return TEST_MEMBARRIER_PASS;
+}
+
+static enum test_membarrier_status test_membarrier(void)
+{
+	enum test_membarrier_status status;
+
+	status = test_membarrier_cmd_fail();
+	if (status)
+		return status;
+	status = test_membarrier_flags_fail();
+	if (status)
+		return status;
+	status = test_membarrier_success();
+	if (status)
+		return status;
+	return TEST_MEMBARRIER_PASS;
+}
+
+static enum test_membarrier_status test_membarrier_query(void)
+{
+	int flags = 0, ret;
+
+	printf("membarrier MEMBARRIER_CMD_QUERY ");
+	ret = sys_membarrier(MEMBARRIER_CMD_QUERY, flags);
+	if (ret < 0) {
+		printf("failed. %s.\n", strerror(errno));
+		switch (errno) {
+		case ENOSYS:
+			/*
+			 * It is valid to build a kernel with
+			 * CONFIG_MEMBARRIER=n. However, this skips the tests.
+			 */
+			return TEST_MEMBARRIER_SKIP;
+		case EINVAL:
+		default:
+			return TEST_MEMBARRIER_FAIL;
+		}
+	}
+	if (!(ret & MEMBARRIER_CMD_SHARED)) {
+		printf("command MEMBARRIER_CMD_SHARED is not supported.\n");
+		return TEST_MEMBARRIER_FAIL;
+	}
+	printf("syscall available.\n");
+	return TEST_MEMBARRIER_PASS;
+}
+
+int main(int argc, char **argv)
+{
+	switch (test_membarrier_query()) {
+	case TEST_MEMBARRIER_FAIL:
+		return ksft_exit_fail();
+	case TEST_MEMBARRIER_SKIP:
+		return ksft_exit_skip();
+	}
+	switch (test_membarrier()) {
+	case TEST_MEMBARRIER_FAIL:
+		return ksft_exit_fail();
+	case TEST_MEMBARRIER_SKIP:
+		return ksft_exit_skip();
+	}
+
+	printf("membarrier: tests done!\n");
+	return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
index 0e3b41e..eebac29 100644
--- a/tools/testing/selftests/mqueue/Makefile
+++ b/tools/testing/selftests/mqueue/Makefile
@@ -1,8 +1,8 @@
-CFLAGS = -O2
+CFLAGS += -O2
+LDLIBS = -lrt -lpthread -lpopt
+TEST_PROGS := mq_open_tests mq_perf_tests
 
-all:
-	$(CC) $(CFLAGS) mq_open_tests.c -o mq_open_tests -lrt
-	$(CC) $(CFLAGS) -o mq_perf_tests mq_perf_tests.c -lrt -lpthread -lpopt
+all: $(TEST_PROGS)
 
 include ../lib.mk
 
@@ -11,8 +11,6 @@
 	@./mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]"
 endef
 
-TEST_PROGS := mq_open_tests mq_perf_tests
-
 override define EMIT_TESTS
 	echo "./mq_open_tests /test1 || echo \"selftests: mq_open_tests [FAIL]\""
 	echo "./mq_perf_tests || echo \"selftests: mq_perf_tests [FAIL]\""
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index a004b4c..770f47a 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1210,6 +1210,10 @@
 # define ARCH_REGS	struct pt_regs
 # define SYSCALL_NUM	gpr[0]
 # define SYSCALL_RET	gpr[3]
+#elif defined(__s390__)
+# define ARCH_REGS     s390_regs
+# define SYSCALL_NUM   gprs[2]
+# define SYSCALL_RET   gprs[2]
 #else
 # error "Do not know how to find your architecture's registers and syscalls"
 #endif
@@ -1243,7 +1247,8 @@
 	ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
 	EXPECT_EQ(0, ret);
 
-#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || defined(__powerpc__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \
+    defined(__powerpc__) || defined(__s390__)
 	{
 		regs.SYSCALL_NUM = syscall;
 	}
@@ -1281,17 +1286,21 @@
 	ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
 	EXPECT_EQ(0, ret);
 
+	/* Validate and take action on expected syscalls. */
 	switch (msg) {
 	case 0x1002:
 		/* change getpid to getppid. */
+		EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
 		change_syscall(_metadata, tracee, __NR_getppid);
 		break;
 	case 0x1003:
 		/* skip gettid. */
+		EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
 		change_syscall(_metadata, tracee, -1);
 		break;
 	case 0x1004:
 		/* do nothing (allow getppid) */
+		EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
 		break;
 	default:
 		EXPECT_EQ(0, msg) {
@@ -1409,6 +1418,8 @@
 #  define __NR_seccomp 277
 # elif defined(__powerpc__)
 #  define __NR_seccomp 358
+# elif defined(__s390__)
+#  define __NR_seccomp 348
 # else
 #  warning "seccomp syscall number unknown for this architecture"
 #  define __NR_seccomp 0xffff
@@ -1453,6 +1464,9 @@
 
 	/* Reject insane operation. */
 	ret = seccomp(-1, 0, &prog);
+	ASSERT_NE(ENOSYS, errno) {
+		TH_LOG("Kernel does not support seccomp syscall!");
+	}
 	EXPECT_EQ(EINVAL, errno) {
 		TH_LOG("Did not reject crazy op value!");
 	}
@@ -1501,6 +1515,9 @@
 	}
 
 	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
+	ASSERT_NE(ENOSYS, errno) {
+		TH_LOG("Kernel does not support seccomp syscall!");
+	}
 	EXPECT_EQ(0, ret) {
 		TH_LOG("Could not install filter!");
 	}
@@ -1535,6 +1552,9 @@
 
 	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
 		      &prog);
+	ASSERT_NE(ENOSYS, errno) {
+		TH_LOG("Kernel does not support seccomp syscall!");
+	}
 	EXPECT_EQ(0, ret) {
 		TH_LOG("Could not install initial filter with TSYNC!");
 	}
@@ -1694,6 +1714,9 @@
 
 	/* Check prctl failure detection by requesting sib 0 diverge. */
 	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
+	ASSERT_NE(ENOSYS, errno) {
+		TH_LOG("Kernel does not support seccomp syscall!");
+	}
 	ASSERT_EQ(0, ret) {
 		TH_LOG("setting filter failed");
 	}
@@ -1731,6 +1754,9 @@
 	}
 
 	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
+	ASSERT_NE(ENOSYS, errno) {
+		TH_LOG("Kernel does not support seccomp syscall!");
+	}
 	ASSERT_EQ(0, ret) {
 		TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
 	}
@@ -1805,6 +1831,9 @@
 
 	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
 		      &self->apply_prog);
+	ASSERT_NE(ENOSYS, errno) {
+		TH_LOG("Kernel does not support seccomp syscall!");
+	}
 	ASSERT_EQ(0, ret) {
 		TH_LOG("Could install filter on all threads!");
 	}
@@ -1833,6 +1862,9 @@
 	}
 
 	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
+	ASSERT_NE(ENOSYS, errno) {
+		TH_LOG("Kernel does not support seccomp syscall!");
+	}
 	ASSERT_EQ(0, ret) {
 		TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
 	}
@@ -1890,6 +1922,9 @@
 	}
 
 	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
+	ASSERT_NE(ENOSYS, errno) {
+		TH_LOG("Kernel does not support seccomp syscall!");
+	}
 	ASSERT_EQ(0, ret) {
 		TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
 	}
diff --git a/tools/testing/selftests/seccomp/test_harness.h b/tools/testing/selftests/seccomp/test_harness.h
index 977a6af..fb28416 100644
--- a/tools/testing/selftests/seccomp/test_harness.h
+++ b/tools/testing/selftests/seccomp/test_harness.h
@@ -370,11 +370,8 @@
 	__typeof__(_expected) __exp = (_expected); \
 	__typeof__(_seen) __seen = (_seen); \
 	if (!(__exp _t __seen)) { \
-		unsigned long long __exp_print = 0; \
-		unsigned long long __seen_print = 0; \
-		/* Avoid casting complaints the scariest way we can. */ \
-		memcpy(&__exp_print, &__exp, sizeof(__exp)); \
-		memcpy(&__seen_print, &__seen, sizeof(__seen)); \
+		unsigned long long __exp_print = (unsigned long long)__exp; \
+		unsigned long long __seen_print = (unsigned long long)__seen; \
 		__TH_LOG("Expected %s (%llu) %s %s (%llu)", \
 			 #_expected, __exp_print, #_t, \
 			 #_seen, __seen_print); \
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index d36fab7..3c53cac 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,6 +1,6 @@
 # Makefile for vm selftests
 
-CFLAGS = -Wall
+CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
 BINARIES = compaction_test
 BINARIES += hugepage-mmap
 BINARIES += hugepage-shm
@@ -12,8 +12,11 @@
 all: $(BINARIES)
 %: %.c
 	$(CC) $(CFLAGS) -o $@ $^ -lrt
-userfaultfd: userfaultfd.c
-	$(CC) $(CFLAGS) -O2 -o $@ $^ -lpthread
+userfaultfd: userfaultfd.c ../../../../usr/include/linux/kernel.h
+	$(CC) $(CFLAGS) -O2 -o $@ $< -lpthread
+
+../../../../usr/include/linux/kernel.h:
+	make -C ../../../.. headers_install
 
 TEST_PROGS := run_vmtests
 TEST_FILES := $(BINARIES)
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 2c7cca6..d77ed41 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -64,17 +64,9 @@
 #include <sys/syscall.h>
 #include <sys/ioctl.h>
 #include <pthread.h>
-#include "../../../../include/uapi/linux/userfaultfd.h"
+#include <linux/userfaultfd.h>
 
-#ifdef __x86_64__
-#define __NR_userfaultfd 323
-#elif defined(__i386__)
-#define __NR_userfaultfd 374
-#elif defined(__powewrpc__)
-#define __NR_userfaultfd 364
-#else
-#error "missing __NR_userfaultfd definition"
-#endif
+#ifdef __NR_userfaultfd
 
 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
 
@@ -430,7 +422,7 @@
 	struct uffdio_register uffdio_register;
 	struct uffdio_api uffdio_api;
 	unsigned long cpu;
-	int uffd_flags;
+	int uffd_flags, err;
 	unsigned long userfaults[nr_cpus];
 
 	if (posix_memalign(&area, page_size, nr_pages * page_size)) {
@@ -473,6 +465,14 @@
 		*area_mutex(area_src, nr) = (pthread_mutex_t)
 			PTHREAD_MUTEX_INITIALIZER;
 		count_verify[nr] = *area_count(area_src, nr) = 1;
+		/*
+		 * In the transition between 255 to 256, powerpc will
+		 * read out of order in my_bcmp and see both bytes as
+		 * zero, so leave a placeholder below always non-zero
+		 * after the count, to avoid my_bcmp to trigger false
+		 * positives.
+		 */
+		*(area_count(area_src, nr) + 1) = 1;
 	}
 
 	pipefd = malloc(sizeof(int) * nr_cpus * 2);
@@ -499,6 +499,7 @@
 	pthread_attr_init(&attr);
 	pthread_attr_setstacksize(&attr, 16*1024*1024);
 
+	err = 0;
 	while (bounces--) {
 		unsigned long expected_ioctls;
 
@@ -579,20 +580,13 @@
 		/* verification */
 		if (bounces & BOUNCE_VERIFY) {
 			for (nr = 0; nr < nr_pages; nr++) {
-				if (my_bcmp(area_dst,
-					    area_dst + nr * page_size,
-					    sizeof(pthread_mutex_t))) {
-					fprintf(stderr,
-						"error mutex 2 %lu\n",
-						nr);
-					bounces = 0;
-				}
 				if (*area_count(area_dst, nr) != count_verify[nr]) {
 					fprintf(stderr,
 						"error area_count %Lu %Lu %lu\n",
 						*area_count(area_src, nr),
 						count_verify[nr],
 						nr);
+					err = 1;
 					bounces = 0;
 				}
 			}
@@ -609,7 +603,7 @@
 		printf("\n");
 	}
 
-	return 0;
+	return err;
 }
 
 int main(int argc, char **argv)
@@ -618,8 +612,8 @@
 		fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1);
 	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 	page_size = sysconf(_SC_PAGE_SIZE);
-	if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) >
-	    page_size)
+	if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) * 2
+	    > page_size)
 		fprintf(stderr, "Impossible to run this test\n"), exit(2);
 	nr_pages_per_cpu = atol(argv[1]) * 1024*1024 / page_size /
 		nr_cpus;
@@ -637,3 +631,15 @@
 	       nr_pages, nr_pages_per_cpu);
 	return userfaultfd_stress();
 }
+
+#else /* __NR_userfaultfd */
+
+#warning "missing __NR_userfaultfd definition"
+
+int main(void)
+{
+	printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
+	return 0;
+}
+
+#endif /* __NR_userfaultfd */
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
index 9a43a59..421c607 100644
--- a/tools/testing/selftests/x86/entry_from_vm86.c
+++ b/tools/testing/selftests/x86/entry_from_vm86.c
@@ -116,8 +116,9 @@
 	v86->regs.eip = eip;
 	ret = vm86(VM86_ENTER, v86);
 
-	if (ret == -1 && errno == ENOSYS) {
-		printf("[SKIP]\tvm86 not supported\n");
+	if (ret == -1 && (errno == ENOSYS || errno == EPERM)) {
+		printf("[SKIP]\tvm86 %s\n",
+		       errno == ENOSYS ? "not supported" : "not allowed");
 		return false;
 	}
 
diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh
index 20de9a7..683a292 100755
--- a/tools/testing/selftests/zram/zram.sh
+++ b/tools/testing/selftests/zram/zram.sh
@@ -1,15 +1,7 @@
 #!/bin/bash
 TCID="zram.sh"
 
-check_prereqs()
-{
-	local msg="skip all tests:"
-
-	if [ $UID != 0 ]; then
-		echo $msg must be run as root >&2
-		exit 0
-	fi
-}
+. ./zram_lib.sh
 
 run_zram () {
 echo "--------------------"
diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh
index 424e68e..f6a9c73 100755
--- a/tools/testing/selftests/zram/zram_lib.sh
+++ b/tools/testing/selftests/zram/zram_lib.sh
@@ -23,8 +23,9 @@
 check_prereqs()
 {
 	local msg="skip all tests:"
+	local uid=$(id -u)
 
-	if [ $UID != 0 ]; then
+	if [ $uid -ne 0 ]; then
 		echo $msg must be run as root >&2
 		exit 0
 	fi
diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile
index 505ad51..39c89a5 100644
--- a/tools/virtio/Makefile
+++ b/tools/virtio/Makefile
@@ -6,7 +6,7 @@
 CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE
 vpath %.c ../../drivers/virtio ../../drivers/vhost
 mod:
-	${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test
+	${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
 .PHONY: all test mod clean
 clean:
 	${RM} *.o vringh_test virtio_test vhost_test/*.o vhost_test/.*.cmd \
diff --git a/tools/virtio/asm/barrier.h b/tools/virtio/asm/barrier.h
index aff61e1..26b7926 100644
--- a/tools/virtio/asm/barrier.h
+++ b/tools/virtio/asm/barrier.h
@@ -3,6 +3,8 @@
 #define mb() __sync_synchronize()
 
 #define smp_mb()	mb()
+# define dma_rmb()	barrier()
+# define dma_wmb()	barrier()
 # define smp_rmb()	barrier()
 # define smp_wmb()	barrier()
 /* Weak barriers should be used. If not - it's a bug */
diff --git a/tools/virtio/linux/export.h b/tools/virtio/linux/export.h
new file mode 100644
index 0000000..416875e
--- /dev/null
+++ b/tools/virtio/linux/export.h
@@ -0,0 +1,3 @@
+#define EXPORT_SYMBOL_GPL(sym) extern typeof(sym) sym
+#define EXPORT_SYMBOL(sym) extern typeof(sym) sym
+
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 1e8ce69..0a3da64 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -22,6 +22,7 @@
 
 typedef unsigned long long dma_addr_t;
 typedef size_t __kernel_size_t;
+typedef unsigned int __wsum;
 
 struct page {
 	unsigned long long dummy;
@@ -47,6 +48,13 @@
 		return __kmalloc_fake;
 	return malloc(s);
 }
+static inline void *kzalloc(size_t s, gfp_t gfp)
+{
+	void *p = kmalloc(s, gfp);
+
+	memset(p, 0, s);
+	return p;
+}
 
 static inline void kfree(void *p)
 {
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 76e38d2..48c6e1a 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -200,6 +200,14 @@
 	timer->irq = irq;
 
 	/*
+	 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
+	 * and to 0 for ARMv7.  We provide an implementation that always
+	 * resets the timer to be disabled and unmasked and is compliant with
+	 * the ARMv7 architecture.
+	 */
+	timer->cntv_ctl = 0;
+
+	/*
 	 * Tell the VGIC that the virtual interrupt is tied to a
 	 * physical interrupt. We do that once per VCPU.
 	 */
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index afbf925..7dd5d62 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -288,7 +288,7 @@
 
 	vgic->vctrl_base = NULL;
 	vgic->type = VGIC_V3;
-	vgic->max_gic_vcpus = KVM_MAX_VCPUS;
+	vgic->max_gic_vcpus = VGIC_V3_MAX_CPUS;
 
 	kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
 		 vcpu_res.start, vgic->maint_irq);
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 9eb489a..6bd1c9b 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1144,26 +1144,11 @@
 		struct irq_phys_map *map;
 		map = vgic_irq_map_search(vcpu, irq);
 
-		/*
-		 * If we have a mapping, and the virtual interrupt is
-		 * being injected, then we must set the state to
-		 * active in the physical world. Otherwise the
-		 * physical interrupt will fire and the guest will
-		 * exit before processing the virtual interrupt.
-		 */
 		if (map) {
-			int ret;
-
-			BUG_ON(!map->active);
 			vlr.hwirq = map->phys_irq;
 			vlr.state |= LR_HW;
 			vlr.state &= ~LR_EOI_INT;
 
-			ret = irq_set_irqchip_state(map->irq,
-						    IRQCHIP_STATE_ACTIVE,
-						    true);
-			WARN_ON(ret);
-
 			/*
 			 * Make sure we're not going to sample this
 			 * again, as a HW-backed interrupt cannot be
@@ -1255,7 +1240,7 @@
 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
 	unsigned long *pa_percpu, *pa_shared;
-	int i, vcpu_id;
+	int i, vcpu_id, lr, ret;
 	int overflow = 0;
 	int nr_shared = vgic_nr_shared_irqs(dist);
 
@@ -1310,6 +1295,31 @@
 		 */
 		clear_bit(vcpu_id, dist->irq_pending_on_cpu);
 	}
+
+	for (lr = 0; lr < vgic->nr_lr; lr++) {
+		struct vgic_lr vlr;
+
+		if (!test_bit(lr, vgic_cpu->lr_used))
+			continue;
+
+		vlr = vgic_get_lr(vcpu, lr);
+
+		/*
+		 * If we have a mapping, and the virtual interrupt is
+		 * presented to the guest (as pending or active), then we must
+		 * set the state to active in the physical world. See
+		 * Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt.
+		 */
+		if (vlr.state & LR_HW) {
+			struct irq_phys_map *map;
+			map = vgic_irq_map_search(vcpu, vlr.irq);
+
+			ret = irq_set_irqchip_state(map->irq,
+						    IRQCHIP_STATE_ACTIVE,
+						    true);
+			WARN_ON(ret);
+		}
+	}
 }
 
 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h
index 5cbf190..6bca74c 100644
--- a/virt/kvm/coalesced_mmio.h
+++ b/virt/kvm/coalesced_mmio.h
@@ -24,9 +24,9 @@
 int kvm_coalesced_mmio_init(struct kvm *kvm);
 void kvm_coalesced_mmio_free(struct kvm *kvm);
 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
-																				struct kvm_coalesced_mmio_zone *zone);
+					struct kvm_coalesced_mmio_zone *zone);
 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
-																				struct kvm_coalesced_mmio_zone *zone);
+					struct kvm_coalesced_mmio_zone *zone);
 
 #else
 
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 9ff4193..79db453 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -771,40 +771,14 @@
 	return KVM_MMIO_BUS;
 }
 
-static int
-kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
+				enum kvm_bus bus_idx,
+				struct kvm_ioeventfd *args)
 {
-	enum kvm_bus              bus_idx;
-	struct _ioeventfd        *p;
-	struct eventfd_ctx       *eventfd;
-	int                       ret;
 
-	bus_idx = ioeventfd_bus_from_flags(args->flags);
-	/* must be natural-word sized, or 0 to ignore length */
-	switch (args->len) {
-	case 0:
-	case 1:
-	case 2:
-	case 4:
-	case 8:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* check for range overflow */
-	if (args->addr + args->len < args->addr)
-		return -EINVAL;
-
-	/* check for extra flags that we don't understand */
-	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
-		return -EINVAL;
-
-	/* ioeventfd with no length can't be combined with DATAMATCH */
-	if (!args->len &&
-	    args->flags & (KVM_IOEVENTFD_FLAG_PIO |
-			   KVM_IOEVENTFD_FLAG_DATAMATCH))
-		return -EINVAL;
+	struct eventfd_ctx *eventfd;
+	struct _ioeventfd *p;
+	int ret;
 
 	eventfd = eventfd_ctx_fdget(args->fd);
 	if (IS_ERR(eventfd))
@@ -843,16 +817,6 @@
 	if (ret < 0)
 		goto unlock_fail;
 
-	/* When length is ignored, MMIO is also put on a separate bus, for
-	 * faster lookups.
-	 */
-	if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
-		ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
-					      p->addr, 0, &p->dev);
-		if (ret < 0)
-			goto register_fail;
-	}
-
 	kvm->buses[bus_idx]->ioeventfd_count++;
 	list_add_tail(&p->list, &kvm->ioeventfds);
 
@@ -860,8 +824,6 @@
 
 	return 0;
 
-register_fail:
-	kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
 unlock_fail:
 	mutex_unlock(&kvm->slots_lock);
 
@@ -873,14 +835,13 @@
 }
 
 static int
-kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
+			   struct kvm_ioeventfd *args)
 {
-	enum kvm_bus              bus_idx;
 	struct _ioeventfd        *p, *tmp;
 	struct eventfd_ctx       *eventfd;
 	int                       ret = -ENOENT;
 
-	bus_idx = ioeventfd_bus_from_flags(args->flags);
 	eventfd = eventfd_ctx_fdget(args->fd);
 	if (IS_ERR(eventfd))
 		return PTR_ERR(eventfd);
@@ -901,10 +862,6 @@
 			continue;
 
 		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
-		if (!p->length) {
-			kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
-						  &p->dev);
-		}
 		kvm->buses[bus_idx]->ioeventfd_count--;
 		ioeventfd_release(p);
 		ret = 0;
@@ -918,6 +875,71 @@
 	return ret;
 }
 
+static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+{
+	enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
+	int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
+
+	if (!args->len && bus_idx == KVM_MMIO_BUS)
+		kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
+
+	return ret;
+}
+
+static int
+kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+{
+	enum kvm_bus              bus_idx;
+	int ret;
+
+	bus_idx = ioeventfd_bus_from_flags(args->flags);
+	/* must be natural-word sized, or 0 to ignore length */
+	switch (args->len) {
+	case 0:
+	case 1:
+	case 2:
+	case 4:
+	case 8:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* check for range overflow */
+	if (args->addr + args->len < args->addr)
+		return -EINVAL;
+
+	/* check for extra flags that we don't understand */
+	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
+		return -EINVAL;
+
+	/* ioeventfd with no length can't be combined with DATAMATCH */
+	if (!args->len &&
+	    args->flags & (KVM_IOEVENTFD_FLAG_PIO |
+			   KVM_IOEVENTFD_FLAG_DATAMATCH))
+		return -EINVAL;
+
+	ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
+	if (ret)
+		goto fail;
+
+	/* When length is ignored, MMIO is also put on a separate bus, for
+	 * faster lookups.
+	 */
+	if (!args->len && bus_idx == KVM_MMIO_BUS) {
+		ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
+		if (ret < 0)
+			goto fast_fail;
+	}
+
+	return 0;
+
+fast_fail:
+	kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
+fail:
+	return ret;
+}
+
 int
 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a25a731..8db1d93 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -66,8 +66,8 @@
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
-/* halt polling only reduces halt latency by 5-7 us, 500us is enough */
-static unsigned int halt_poll_ns = 500000;
+/* Architectures should define their poll value according to the halt latency */
+static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
 module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
 
 /* Default doubles per-vcpu halt_poll_ns. */
@@ -2004,6 +2004,7 @@
 	if (vcpu->halt_poll_ns) {
 		ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
 
+		++vcpu->stat.halt_attempted_poll;
 		do {
 			/*
 			 * This sets KVM_REQ_UNHALT if an interrupt
@@ -2043,7 +2044,8 @@
 		else if (vcpu->halt_poll_ns < halt_poll_ns &&
 			block_ns < halt_poll_ns)
 			grow_halt_poll_ns(vcpu);
-	}
+	} else
+		vcpu->halt_poll_ns = 0;
 
 	trace_kvm_vcpu_wakeup(block_ns, waited);
 }
@@ -3156,10 +3158,25 @@
 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
 				 const struct kvm_io_range *r2)
 {
-	if (r1->addr < r2->addr)
+	gpa_t addr1 = r1->addr;
+	gpa_t addr2 = r2->addr;
+
+	if (addr1 < addr2)
 		return -1;
-	if (r1->addr + r1->len > r2->addr + r2->len)
+
+	/* If r2->len == 0, match the exact address.  If r2->len != 0,
+	 * accept any overlapping write.  Any order is acceptable for
+	 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
+	 * we process all of them.
+	 */
+	if (r2->len) {
+		addr1 += r1->len;
+		addr2 += r2->len;
+	}
+
+	if (addr1 > addr2)
 		return 1;
+
 	return 0;
 }